{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,5]],"date-time":"2025-11-05T21:13:23Z","timestamp":1762377203553,"version":"3.37.3"},"reference-count":41,"publisher":"Springer Science and Business Media LLC","issue":"7","license":[{"start":{"date-parts":[[2022,7,23]],"date-time":"2022-07-23T00:00:00Z","timestamp":1658534400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,7,23]],"date-time":"2022-07-23T00:00:00Z","timestamp":1658534400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62066047","61966037"],"award-info":[{"award-number":["62066047","61966037"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Key project of Basic Research Program of Yunnan Province","award":["202101AS070031"],"award-info":[{"award-number":["202101AS070031"]}]},{"name":"General project of national Natural Science Foundation of China","award":["81771928"],"award-info":[{"award-number":["81771928"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2023,4]]},"DOI":"10.1007\/s10489-022-03952-z","type":"journal-article","created":{"date-parts":[[2022,7,23]],"date-time":"2022-07-23T10:03:10Z","timestamp":1658570590000},"page":"8114-8132","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":21,"title":["A robust infrared and visible image fusion framework via multi-receptive-field attention and color visual perception"],"prefix":"10.1007","volume":"53","author":[{"given":"Zhaisheng","family":"Ding","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6061-0949","authenticated-orcid":false,"given":"Haiyan","family":"Li","sequence":"additional","affiliation":[]},{"given":"Dongming","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Yanyu","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Ruichao","family":"Hou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,7,23]]},"reference":[{"key":"3952_CR1","doi-asserted-by":"publisher","first-page":"153","DOI":"10.1016\/j.inffus.2018.02.004","volume":"45","author":"J Ma","year":"2019","unstructured":"Ma J, Ma Y, Li C (2019) Infrared and visible image fusion methods and applications: A survey. Inf Fusion 45:153\u2013178","journal-title":"Inf Fusion"},{"issue":"4","key":"3952_CR2","doi-asserted-by":"publisher","first-page":"498","DOI":"10.1109\/TMM.2015.2398195","volume":"17","author":"J Geng","year":"2015","unstructured":"Geng J, Miao Z, Zhang X (2015) Efficient heuristic methods for multimodal fusion and concept fusion in video concept detection. IEEE Trans Multimedia 17(4):498\u2013511","journal-title":"IEEE Trans Multimedia"},{"key":"3952_CR3","doi-asserted-by":"publisher","first-page":"101","DOI":"10.1016\/j.isprsjprs.2020.11.001","volume":"171","author":"FD Javan","year":"2021","unstructured":"Javan FD, Samadzadegan F, Mehravar S, Toosi A, Stein A (2021) A review of image fusion techniques for pan-sharpening of high-resolution satellite imagery. ISPRS J Photogramm Remote Sens 171:101\u2013117","journal-title":"ISPRS J Photogramm Remote Sens"},{"issue":"12","key":"3952_CR4","doi-asserted-by":"publisher","first-page":"2706","DOI":"10.1109\/TMM.2017.2711422","volume":"19","author":"H Hu","year":"2017","unstructured":"Hu H, Wu J, Li B, Guo Q, Zheng J (2017) An adaptive fusion algorithm for visible and infrared videos based on entropy and the cumulative distribution of gray levels. IEEE Trans Multimedia 19(12):2706\u20132719","journal-title":"IEEE Trans Multimedia"},{"issue":"13\u201314","key":"3952_CR5","doi-asserted-by":"publisher","first-page":"3032","DOI":"10.1016\/j.optcom.2012.02.064","volume":"285","author":"Q Zhang","year":"2012","unstructured":"Zhang Q, Wang L, Ma Z, Li H (2012) A novel video fusion framework using surfacelet transform. Opt Commun 285(13\u201314):3032\u20133041","journal-title":"Opt Commun"},{"issue":"9","key":"3952_CR6","doi-asserted-by":"publisher","first-page":"2485","DOI":"10.1016\/j.sigpro.2013.03.018","volume":"93","author":"Q Zhang","year":"2013","unstructured":"Zhang Q, Chen Y, Wang L (2013) Multisensor video fusion based on spatial\u2013temporal salience detection. Signal Process 93(9):2485\u20132499","journal-title":"Signal Process"},{"key":"3952_CR7","doi-asserted-by":"crossref","unstructured":"Bin S, Yingjie L, Rongguo F (2020) Multi-Band infrared and visual video registration and fusion parallel acceleration method. Presented at the Proceedings of the 2020 International conference on computing, Networks and Internet of Things, Sanya, China, 107-112","DOI":"10.1145\/3398329.3398344"},{"key":"3952_CR8","first-page":"1","volume":"70","author":"J Li","year":"2021","unstructured":"Li J, Huo H, Li C, Wang R, Sui C, Liu Z (2021) Multigrained attention network for infrared and visible image fusion. IEEE Trans Instrum Meas 70:1\u201312","journal-title":"IEEE Trans Instrum Meas"},{"key":"3952_CR9","doi-asserted-by":"publisher","first-page":"57","DOI":"10.1016\/j.inffus.2017.05.006","volume":"40","author":"Q Zhang","year":"2018","unstructured":"Zhang Q, Liu Y, Rick S (2018) Sparse representation based multi-sensor image fusion for multi-focus and multi-modality images: A review. Inf Fusion 40:57\u201375","journal-title":"Inf Fusion"},{"issue":"6","key":"3952_CR10","doi-asserted-by":"publisher","first-page":"1760","DOI":"10.1109\/JSEN.2016.2646741","volume":"PP","author":"X Luo","year":"2017","unstructured":"Luo X, Zhang Z, Zhang B, Wu X (2017) Image fusion with contextual statistical similarity and nonsubsampled shearlet transform. IEEE Sensors J PP(6):1760\u20131771","journal-title":"IEEE Sensors J"},{"key":"3952_CR11","doi-asserted-by":"crossref","unstructured":"Zhang TY, Zhou Q, Feng HJ, Xu ZH, Li Q, Chen YT (2013) Fusion of infrared and visible light images based on nonsubsampled shearlet transform. Proc SPIE 8907, id. 89071H, 8 pp","DOI":"10.1117\/12.2032470"},{"issue":"11","key":"3952_CR12","doi-asserted-by":"publisher","first-page":"3137","DOI":"10.1109\/TMM.2018.2823900","volume":"20","author":"Y Jiang","year":"2018","unstructured":"Jiang Y, Wu Z, Tang J, Li Z, Xue X, Chang S (2018) Modeling multimodal clues in a hybrid deep learning framework for video classification. IEEE Trans Multimedia 20(11):3137\u20133147","journal-title":"IEEE Trans Multimedia"},{"key":"3952_CR13","doi-asserted-by":"publisher","first-page":"640","DOI":"10.1109\/TCI.2020.2965304","volume":"6","author":"RC Hou","year":"2020","unstructured":"Hou RC, Zhou DM, Nie RC (2020) VIF-Net: An unsupervised framework for infrared and visible image fusion. IEEE Trans Comput Imaging 6:640\u2013651","journal-title":"IEEE Trans Comput Imaging"},{"key":"3952_CR14","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1016\/j.inffus.2018.09.004","volume":"48","author":"J Ma","year":"2019","unstructured":"Ma J, Yu W, Liang P et al (2019) FusionGAN: A generative adversarial network for infrared and visible image fusion. Information Fusion 48:11\u201326","journal-title":"Information Fusion"},{"key":"3952_CR15","first-page":"1","volume":"70","author":"J Ma","year":"2021","unstructured":"Ma J, Zhang H, Shao Z, Liang P, Xu H (2021) GANMcC: A generative adversarial network with multiclassification constraints for infrared and visible image fusion. IEEE Trans Instrum Meas 70:1\u201314","journal-title":"IEEE Trans Instrum Meas"},{"issue":"3","key":"3952_CR16","doi-asserted-by":"publisher","first-page":"1850018","DOI":"10.1142\/S0219691318500182","volume":"16","author":"Y Liu","year":"2018","unstructured":"Liu Y, Chen X, Cheng J, Peng H, Wang Z (2018) Infrared and visible image fusion with convolutional neural networks. Int J Wavelets Multiresolution Inf Process 16(3):1850018","journal-title":"Int J Wavelets Multiresolution Inf Process"},{"issue":"7","key":"3952_CR17","doi-asserted-by":"publisher","first-page":"1063","DOI":"10.1007\/s12046-017-0673-1","volume":"42","author":"AV Vanmali","year":"2017","unstructured":"Vanmali AV, Gadre VM (2017) Visible and NIR image fusion using weight-map-guided Laplacian\u2013Gaussian pyramid for improving scene visibility. S\u0101dhan\u0101 42(7):1063\u20131082","journal-title":"S\u0101dhan\u0101"},{"key":"3952_CR18","doi-asserted-by":"publisher","first-page":"72","DOI":"10.1016\/j.inffus.2021.02.023","volume":"73","author":"H Li","year":"2021","unstructured":"Li H, Wu XJ, Kittler J (2021) RFN-Nest: an end-to-end residual fusion network for infrared and visible images. Inf Fusion 73:72\u201386","journal-title":"Inf Fusion"},{"key":"3952_CR19","doi-asserted-by":"publisher","first-page":"103039","DOI":"10.1016\/j.infrared.2019.103039","volume":"102","author":"H Li","year":"2019","unstructured":"Li H, Wu XJ, Durrani TS (2019) Infrared and visible image fusion with ResNet and zero-phase component analysis. Infrared Phys Technol 102:103039","journal-title":"Infrared Phys Technol"},{"key":"3952_CR20","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang Z (2004) Image quality assessment: from error visibility to structural similarity. IEEE Trans Image Process 13:600\u2013612","journal-title":"IEEE Trans Image Process"},{"key":"3952_CR21","doi-asserted-by":"publisher","first-page":"1383","DOI":"10.1109\/TMM.2020.2997127","volume":"23","author":"J Li","year":"2021","unstructured":"Li J, Huo HT, Li C, Wang RH, Feng Q (2021) \"AttentionFGAN: infrared and visible image fusion using attention-based generative adversarial networks,\" (in English). IEEE Trans Multimedia 23:1383\u20131396","journal-title":"IEEE Trans Multimedia"},{"issue":"1","key":"3952_CR22","doi-asserted-by":"publisher","first-page":"59","DOI":"10.1111\/cgf.12671","volume":"35","author":"HS Faridul","year":"2016","unstructured":"Faridul HS, Pouli T, Chamaret C, Stauder J, Reinhard E, Kuzovkin D, Tremeau A (2016) Colour mapping: a review of recent methods, extensions and applications. Comput Graphics Forum 35(1):59\u201388","journal-title":"Comput Graphics Forum"},{"key":"3952_CR23","doi-asserted-by":"crossref","unstructured":"A-Monem ME, Hammood TZ (2020) Video colorization methods: a survey. Iraqi J Sci:675\u2013686","DOI":"10.24996\/ijs.2020.61.3.24"},{"issue":"2","key":"3952_CR24","doi-asserted-by":"publisher","first-page":"69","DOI":"10.1016\/j.inffus.2009.06.005","volume":"11","author":"MA Hogervorst","year":"2010","unstructured":"Hogervorst MA, Toet A (2010) Fast natural color mapping for night-time imagery. Inf Fusion 11(2):69\u201377","journal-title":"Inf Fusion"},{"key":"3952_CR25","doi-asserted-by":"crossref","unstructured":"Reinhard E, Pouli T (2011) Colour spaces for colour transfer. In: Computational Color Imaging - Third International Workshop, CCIW vol. 6626, pp. 1\u201315","DOI":"10.1007\/978-3-642-20404-3_1"},{"issue":"supplement 3","key":"3952_CR26","doi-asserted-by":"publisher","first-page":"S691","DOI":"10.1016\/j.hpb.2021.08.043","volume":"23","author":"C G\u00f3mez-Gavara","year":"2021","unstructured":"G\u00f3mez-Gavara C, Piella G, V\u00e1zquez J et al (2021) LIVERCOLOR: An Algorithm Quantification of Liver Graft Steatosis Using Machine Learning and Color Image Processing. HPB 23(supplement 3):S691\u2013S692","journal-title":"HPB"},{"key":"3952_CR27","doi-asserted-by":"crossref","unstructured":"Pavlovic R, Petrovic V (2012) Multisensor colour image fusion for night vision. Sensor Signal Processing for Defence, pp. 1\u20135","DOI":"10.1049\/ic.2012.0107"},{"key":"3952_CR28","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.dsp.2019.06.014","volume":"93","author":"L Florea","year":"2019","unstructured":"Florea L, Florea C (2019) Directed color transfer for low-light image enhancement. Digit Signal Process 93:1\u201312","journal-title":"Digit Signal Process"},{"key":"3952_CR29","doi-asserted-by":"publisher","first-page":"115664","DOI":"10.1016\/j.image.2019.115664","volume":"80","author":"Y Fang","year":"2020","unstructured":"Fang Y, Li Y, Tu X, Tan T, Wang X (2020) Face completion with hybrid dilated convolution. Signal Process Image Commun 80:115664","journal-title":"Signal Process Image Commun"},{"key":"3952_CR30","doi-asserted-by":"crossref","unstructured":"Wang P, Chen P, Yuan Y, Liu D, Cottrell G (2018) Understanding Convolution for Semantic Segmentation. In: 2018 IEEE Winter Conference on Applications of Computer Vision (WACV), pp. 1451\u20131460","DOI":"10.1109\/WACV.2018.00163"},{"key":"3952_CR31","doi-asserted-by":"publisher","first-page":"103387","DOI":"10.1016\/j.dsp.2022.103387","volume":"123","author":"Y Liu","year":"2022","unstructured":"Liu Y, Zhou D, Nie R, Ding Z, Guo Y, Ruan X, Xia W, Hou R (2022) TSE_Fuse: two stage enhancement method using attention mechanism and feature-linking model for infrared and visible image fusion. Digital Signal Process 123:103387","journal-title":"Digital Signal Process"},{"key":"3952_CR32","doi-asserted-by":"publisher","first-page":"4733","DOI":"10.1109\/TIP.2020.2975984","volume":"29","author":"H Li","year":"2020","unstructured":"Li H, Wu X-J, Kittler J (2020) MDLatLRR: A novel decomposition method for infrared and visible image fusion. IEEE Trans Image Process 29:4733\u20134746","journal-title":"IEEE Trans Image Process"},{"key":"3952_CR33","doi-asserted-by":"crossref","unstructured":"Li H, Wu XJ, Kittler J (2018) Infrared and visible image fusion using a deep learning framework. In: International Conference on Pattern Recognition, pp. 2705\u20132710","DOI":"10.1109\/ICPR.2018.8546006"},{"issue":"5","key":"3952_CR34","doi-asserted-by":"publisher","first-page":"2614","DOI":"10.1109\/TIP.2018.2887342","volume":"28","author":"H Li","year":"2019","unstructured":"Li H, Wu X (2019) DenseFuse: a fusion approach to infrared and visible images. IEEE Trans Image Process 28(5):2614\u20132623","journal-title":"IEEE Trans Image Process"},{"key":"3952_CR35","doi-asserted-by":"publisher","first-page":"103905","DOI":"10.1016\/j.infrared.2021.103905","volume":"118","author":"Z Ding","year":"2021","unstructured":"Ding Z, Li H, Zhou D, Li H, Liu Y, Hou R (2021) CMFA_Net: A cross-modal feature aggregation network for infrared-visible image fusion. Infrared Phys Technol 118:103905","journal-title":"Infrared Phys Technol"},{"key":"3952_CR36","unstructured":"Toet A (2014) TNO image fusion dataset. Figshare. Data. [Online]. Available:\u00a0https:\/\/figshare.com\/articles\/TNimageFusionDataset\/1008029. Accessed 26 Apr 2014"},{"key":"3952_CR37","unstructured":"INO video dataset. [Online]. Available: https:\/\/www.ino.ca\/en\/videoanalytics-dataset\/"},{"issue":"7","key":"3952_CR38","doi-asserted-by":"publisher","first-page":"313","DOI":"10.1049\/el:20020212","volume":"38","author":"G Qu","year":"2002","unstructured":"Qu G, Zhang D, Yan P (2002) Information measure for performance of image fusion. Electron Lett 38(7):313\u2013315","journal-title":"Electron Lett"},{"key":"3952_CR39","unstructured":"Wang Q, Shen Y (2004) Performances evaluation of image fusion techniques based on nonlinear correlation measurement. In: Proceedings of the 21st IEEE Instrumentation and Measurement Technology Conference (IEEE Cat. No.04CH37510)"},{"key":"3952_CR40","doi-asserted-by":"crossref","unstructured":"Kandadai S, Hardin J, Creusere CD (2008) Audio quality assessment using the mean structural similarity measure. In: IEEE international conference on acoustics","DOI":"10.1109\/ICASSP.2008.4517586"},{"issue":"12","key":"3952_CR41","doi-asserted-by":"publisher","first-page":"9645","DOI":"10.1109\/TIM.2020.3005230","volume":"69","author":"H Li","year":"2020","unstructured":"Li H, Wu XJ, Durrani T (2020) NestFuse: an infrared and visible image fusion architecture based on nest connection and spatial\/channel attention models. IEEE Trans Instrum Meas 69(12):9645\u20139656","journal-title":"IEEE Trans Instrum Meas"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-022-03952-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-022-03952-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-022-03952-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,3,16]],"date-time":"2023-03-16T02:23:22Z","timestamp":1678933402000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-022-03952-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,23]]},"references-count":41,"journal-issue":{"issue":"7","published-print":{"date-parts":[[2023,4]]}},"alternative-id":["3952"],"URL":"https:\/\/doi.org\/10.1007\/s10489-022-03952-z","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"type":"print","value":"0924-669X"},{"type":"electronic","value":"1573-7497"}],"subject":[],"published":{"date-parts":[[2022,7,23]]},"assertion":[{"value":"30 June 2022","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"23 July 2022","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}