{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,22]],"date-time":"2026-02-22T14:08:51Z","timestamp":1771769331975,"version":"3.50.1"},"reference-count":55,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2021,5,1]],"date-time":"2021-05-01T00:00:00Z","timestamp":1619827200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2021,5,1]],"date-time":"2021-05-01T00:00:00Z","timestamp":1619827200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Signal Processing"],"published-print":{"date-parts":[[2021,5]]},"DOI":"10.1016\/j.sigpro.2020.107936","type":"journal-article","created":{"date-parts":[[2020,12,9]],"date-time":"2020-12-09T01:00:32Z","timestamp":1607475632000},"page":"107936","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":80,"special_numbering":"C","title":["A saliency-based multiscale approach for infrared and visible image fusion"],"prefix":"10.1016","volume":"182","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9005-6849","authenticated-orcid":false,"given":"Jun","family":"Chen","sequence":"first","affiliation":[]},{"given":"Kangle","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Zhuo","family":"Cheng","sequence":"additional","affiliation":[]},{"given":"Linbo","family":"Luo","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.sigpro.2020.107936_bib0001","series-title":"IEEE Conference on Computer Vision and Pattern Recognition","first-page":"1597","article-title":"Frequency-tuned salient region detection","author":"Achanta","year":"2009"},{"key":"10.1016\/j.sigpro.2020.107936_bib0002","doi-asserted-by":"crossref","first-page":"39","DOI":"10.1016\/j.ins.2012.06.031","article-title":"Fusion of supervised and unsupervised learning for improved classification of hyperspectral images","volume":"217","author":"Alajlan","year":"2012","journal-title":"Inf. Sci."},{"key":"10.1016\/j.sigpro.2020.107936_bib0003","series-title":"International Conference on Information Fusion","first-page":"1","article-title":"Multi-sensor image fusion based on fourth order partial differential equations","author":"Bavirisetti","year":"2017"},{"issue":"4","key":"10.1016\/j.sigpro.2020.107936_bib0004","doi-asserted-by":"crossref","first-page":"671","DOI":"10.1109\/TCOM.1983.1095851","article-title":"The Laplacian pyramid as a compact image code","volume":"31","author":"Burt","year":"1983","journal-title":"IEEE Trans. Commun."},{"key":"10.1016\/j.sigpro.2020.107936_bib0005","doi-asserted-by":"crossref","first-page":"6724","DOI":"10.1109\/ACCESS.2017.2685178","article-title":"Image fusion using quaternion wavelet transform and multiple features","volume":"5","author":"Chai","year":"2017","journal-title":"IEEE Access"},{"issue":"5","key":"10.1016\/j.sigpro.2020.107936_bib0006","doi-asserted-by":"crossref","first-page":"1817","DOI":"10.1137\/040604297","article-title":"Aspects of total variation regularized L1 function approximation","volume":"65","author":"Chan","year":"2005","journal-title":"Siam J. Appl. Math."},{"key":"10.1016\/j.sigpro.2020.107936_bib0007","doi-asserted-by":"crossref","first-page":"64","DOI":"10.1016\/j.ins.2019.08.066","article-title":"Infrared and visible image fusion based on target-enhanced multiscale transform decomposition","volume":"508","author":"Chen","year":"2020","journal-title":"Inf. Sci."},{"key":"10.1016\/j.sigpro.2020.107936_bib0008","doi-asserted-by":"crossref","first-page":"93","DOI":"10.1016\/j.ins.2020.03.051","article-title":"Three-layer medical image fusion with tensor-based features","volume":"525","author":"Du","year":"2020","journal-title":"Inf. Sci."},{"issue":"6","key":"10.1016\/j.sigpro.2020.107936_bib0009","doi-asserted-by":"crossref","first-page":"523","DOI":"10.1504\/IJSPM.2018.095862","article-title":"Morphology-based visible-infrared image fusion framework for smart city","volume":"13","author":"Qi","year":"2018","journal-title":"Int. J. Simul. Process Modell."},{"key":"10.1016\/j.sigpro.2020.107936_bib0010","series-title":"IEEE Conference on Computer Vision and Pattern Recognition","first-page":"87","article-title":"LIME: A method for low-light image enhancement","author":"Guo","year":"2016"},{"key":"10.1016\/j.sigpro.2020.107936_bib0011","doi-asserted-by":"crossref","first-page":"103","DOI":"10.1016\/j.infrared.2014.04.003","article-title":"Novel fusion method for visible light and infrared images based on NSST-SF-PCNN","volume":"65","author":"Kong","year":"2014","journal-title":"Infrared Phys. Technol."},{"issue":"2","key":"10.1016\/j.sigpro.2020.107936_bib0012","doi-asserted-by":"crossref","first-page":"136","DOI":"10.1007\/s004260000024","article-title":"Phase congruency: a low-level image invariant","volume":"64","author":"Kovesi","year":"2000","journal-title":"Psychol. Res.-Psychologische Forschung"},{"issue":"2","key":"10.1016\/j.sigpro.2020.107936_bib0013","doi-asserted-by":"crossref","first-page":"119","DOI":"10.1016\/j.inffus.2005.09.006","article-title":"Pixel- and region-based image fusion with complex wavelets","volume":"8","author":"Lewis","year":"2007","journal-title":"Inf. Fusion"},{"issue":"2","key":"10.1016\/j.sigpro.2020.107936_bib0014","doi-asserted-by":"crossref","first-page":"119","DOI":"10.1016\/j.inffus.2005.09.006","article-title":"Pixel- and region-based image fusion with complex wavelets","volume":"8","author":"Lewis","year":"2007","journal-title":"Inf. Fusion"},{"issue":"5","key":"10.1016\/j.sigpro.2020.107936_bib0015","doi-asserted-by":"crossref","first-page":"2614","DOI":"10.1109\/TIP.2018.2887342","article-title":"DenseFuse: A fusion approach to infrared and visible images","volume":"28","author":"Li","year":"2019","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.sigpro.2020.107936_bib0016","doi-asserted-by":"crossref","first-page":"28","DOI":"10.1016\/j.ins.2020.04.035","article-title":"Infrared and visible image fusion using dual discriminators generative adversarial networks with wasserstein distance","volume":"529","author":"Li","year":"2020","journal-title":"Inf. Sci."},{"issue":"6","key":"10.1016\/j.sigpro.2020.107936_bib0017","doi-asserted-by":"crossref","first-page":"2828","DOI":"10.1109\/TIP.2018.2810539","article-title":"Structure-revealing low-light image enhancement via robust retinex model","volume":"27","author":"Li","year":"2018","journal-title":"IEEE Trans. Image Process."},{"issue":"7","key":"10.1016\/j.sigpro.2020.107936_bib0018","doi-asserted-by":"crossref","first-page":"2864","DOI":"10.1109\/TIP.2013.2244222","article-title":"Image fusion with guided filtering","volume":"22","author":"Li","year":"2013","journal-title":"IEEE Trans. Image Process."},{"issue":"2","key":"10.1016\/j.sigpro.2020.107936_bib0019","doi-asserted-by":"crossref","first-page":"74","DOI":"10.1016\/j.inffus.2010.03.002","article-title":"Performance comparison of different multi-resolution transforms for image fusion","volume":"12","author":"Li","year":"2011","journal-title":"Inf. Fusion"},{"issue":"12","key":"10.1016\/j.sigpro.2020.107936_bib0020","doi-asserted-by":"crossref","first-page":"3450","DOI":"10.1109\/TBME.2012.2217493","article-title":"Group-sparse representation with dictionary learning for medical image denoising and fusion","volume":"59","author":"Li","year":"2012","journal-title":"IEEE Trans. Biomed. Eng."},{"key":"10.1016\/j.sigpro.2020.107936_bib0021","series-title":"IEEE Visual Communications and Image Processing","first-page":"1","article-title":"LLCNN: A convolutional neural network for low-light image enhancement","author":"Li","year":"2017"},{"key":"10.1016\/j.sigpro.2020.107936_bib0022","series-title":"IEEE International Conference on Image Processing","first-page":"3730","article-title":"A low-light image enhancement method for both denoising and contrast enlarging","author":"Lin","year":"2015"},{"issue":"24","key":"10.1016\/j.sigpro.2020.107936_bib0023","doi-asserted-by":"crossref","first-page":"147","DOI":"10.1016\/j.inffus.2014.09.004","article-title":"A general framework for image fusion based on multi-scale transform and sparse representation","volume":"24","author":"Liu","year":"2015","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.sigpro.2020.107936_bib0024","doi-asserted-by":"crossref","first-page":"650","DOI":"10.1016\/j.patcog.2016.06.008","article-title":"LLNet: A deep autoencoder approach to natural low-light image enhancement","volume":"61","author":"Lore","year":"2017","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.sigpro.2020.107936_bib0025","doi-asserted-by":"crossref","first-page":"397","DOI":"10.1016\/j.infrared.2014.09.007","article-title":"The infrared and visible image fusion algorithm based on target separation and sparse representation","volume":"67","author":"Lu","year":"2014","journal-title":"Infrared Phys. Technol."},{"issue":"2","key":"10.1016\/j.sigpro.2020.107936_bib0026","doi-asserted-by":"crossref","first-page":"243","DOI":"10.1007\/s11704-014-2328-2","article-title":"Novel infrared and visible image fusion method based on independent component analysis","volume":"8","author":"Lu","year":"2014","journal-title":"Front. Comput. Sci. China"},{"issue":"31","key":"10.1016\/j.sigpro.2020.107936_bib0027","doi-asserted-by":"crossref","first-page":"100","DOI":"10.1016\/j.inffus.2016.02.001","article-title":"Infrared and visible image fusion via gradient transfer and total variation minimization","volume":"31","author":"Ma","year":"2016","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.sigpro.2020.107936_bib0028","doi-asserted-by":"crossref","first-page":"85","DOI":"10.1016\/j.inffus.2019.07.005","article-title":"Infrared and visible image fusion via detail preserving adversarial learning","volume":"54","author":"Ma","year":"2020","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.sigpro.2020.107936_bib0029","doi-asserted-by":"crossref","first-page":"153","DOI":"10.1016\/j.inffus.2018.02.004","article-title":"Infrared and visible image fusion methods and applications: a survey","volume":"45","author":"Ma","year":"2019","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.sigpro.2020.107936_bib0030","doi-asserted-by":"crossref","first-page":"4980","DOI":"10.1109\/TIP.2020.2977573","article-title":"DDcGAN: A dual-discriminator conditional generative adversarial network for multi-resolution image fusion","volume":"29","author":"Ma","year":"2020","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.sigpro.2020.107936_bib0031","doi-asserted-by":"crossref","first-page":"11","DOI":"10.1016\/j.inffus.2018.09.004","article-title":"FusionGAN: A generative adversarial network for infrared and visible image fusion","volume":"48","author":"Ma","year":"2019","journal-title":"Inf. Fusion"},{"key":"10.1016\/j.sigpro.2020.107936_bib0032","doi-asserted-by":"crossref","first-page":"103016","DOI":"10.1016\/j.cviu.2020.103016","article-title":"Infrared and visible image fusion via gradientlet filter","volume":"197","author":"Ma","year":"2020","journal-title":"Comput. Vis. Image Underst."},{"key":"10.1016\/j.sigpro.2020.107936_bib0033","doi-asserted-by":"crossref","first-page":"12","DOI":"10.1016\/j.neucom.2016.03.009","article-title":"Infrared and visible image fusion using total variation model","volume":"202","author":"Ma","year":"2016","journal-title":"Neurocomputing"},{"issue":"9","key":"10.1016\/j.sigpro.2020.107936_bib0034","doi-asserted-by":"crossref","first-page":"1762","DOI":"10.1109\/TPDS.2012.107","article-title":"Sensor data fusion algorithms for vehicular cyber-physical systems","volume":"23","author":"Miloslavov","year":"2012","journal-title":"IEEE Trans. Parallel Distrib.Syst."},{"key":"10.1016\/j.sigpro.2020.107936_bib0035","doi-asserted-by":"crossref","first-page":"22084","DOI":"10.1109\/ACCESS.2018.2812809","article-title":"Dual autoencoder network for retinex-based low-light image enhancement","volume":"6","author":"Park","year":"2018","journal-title":"IEEE Access"},{"issue":"7","key":"10.1016\/j.sigpro.2020.107936_bib0036","doi-asserted-by":"crossref","first-page":"313","DOI":"10.1049\/el:20020212","article-title":"Information measure for performance of image fusion","volume":"38","author":"Qu","year":"2002","journal-title":"Electron. Lett."},{"key":"10.1016\/j.sigpro.2020.107936_bib0037","series-title":"International Conference on Computer Science and Electronics Engineering","first-page":"2380","article-title":"Feature-level fusion of dual-band infrared images based on gradient pyramid decomposition","author":"Qu","year":"2013"},{"issue":"4","key":"10.1016\/j.sigpro.2020.107936_bib0038","doi-asserted-by":"crossref","first-page":"355","DOI":"10.1088\/0957-0233\/8\/4\/002","article-title":"In-fibre Bragg grating sensors","volume":"8","author":"Rao","year":"1997","journal-title":"Meas. Technol."},{"issue":"1","key":"10.1016\/j.sigpro.2020.107936_bib0039","doi-asserted-by":"crossref","first-page":"023522","DOI":"10.1117\/1.2945910","article-title":"Assessment of image fusion procedures using entropy, image quality, and multispectral classification","volume":"2","author":"Roberts","year":"2008","journal-title":"J. Appl. Remote Sens."},{"key":"10.1016\/j.sigpro.2020.107936_bib0040","series-title":"Proceedings of International Conference on Image Processing","first-page":"288","article-title":"Image sequence fusion using a shift-invariant wavelet transform","author":"Rockinger","year":"1997"},{"issue":"3","key":"10.1016\/j.sigpro.2020.107936_bib0041","doi-asserted-by":"crossref","first-page":"1041","DOI":"10.1016\/j.asoc.2011.11.020","article-title":"Infrared and visible image fusion using fuzzy logic and population-based optimization","volume":"12","author":"Saeedi","year":"2012","journal-title":"Appl. Soft Comput."},{"key":"10.1016\/j.sigpro.2020.107936_bib0042","doi-asserted-by":"crossref","first-page":"477","DOI":"10.1016\/j.infrared.2014.09.019","article-title":"Fusion method for infrared and visible images by using non-negative sparse representation","volume":"67","author":"Wang","year":"2014","journal-title":"Infrared Phys. Technol."},{"issue":"4","key":"10.1016\/j.sigpro.2020.107936_bib0043","doi-asserted-by":"crossref","first-page":"600","DOI":"10.1109\/TIP.2003.819861","article-title":"Image quality assessment: from error visibility to structural similarity","volume":"13","author":"Wang","year":"2004","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.sigpro.2020.107936_bib0044","doi-asserted-by":"crossref","first-page":"53","DOI":"10.1016\/j.infrared.2015.01.002","article-title":"A fusion algorithm for infrared and visible images based on adaptive dual-channel unit-linking PCNN in NSCT domain","volume":"69","author":"Xiang","year":"2015","journal-title":"Infrared Phys. Technol."},{"issue":"12","key":"10.1016\/j.sigpro.2020.107936_bib0045","doi-asserted-by":"crossref","first-page":"1135","DOI":"10.3390\/e21121135","article-title":"A novel infrared and visible image information fusion method based on phase congruency and image entropy","volume":"21","author":"Huang","year":"2019","journal-title":"Entropy"},{"key":"10.1016\/j.sigpro.2020.107936_bib0046","article-title":"U2Fusion: A unified unsupervised image fusion network","author":"Xu","year":"2020","journal-title":"IEEE Trans. Pattern Anal. Mach.Intell."},{"key":"10.1016\/j.sigpro.2020.107936_bib0047","series-title":"Proceedings of the AAAI Conference on Artificial Intelligence","first-page":"12484","article-title":"FusionDN: A unified densely connected network for image fusion","author":"Xu","year":"2020"},{"issue":"2","key":"10.1016\/j.sigpro.2020.107936_bib0048","doi-asserted-by":"crossref","first-page":"146","DOI":"10.1016\/j.infrared.2009.10.007","article-title":"One color contrast enhanced infrared and visible image fusion method","volume":"53","author":"Yin","year":"2010","journal-title":"Infrared Phys. Technol."},{"issue":"2","key":"10.1016\/j.sigpro.2020.107936_bib0049","first-page":"57","article-title":"Infrared and visible images fusion method based on discrete wavelet transform","volume":"28","author":"Zhan","year":"2017","journal-title":"J. Comput."},{"key":"10.1016\/j.sigpro.2020.107936_bib0050","series-title":"Proceedings of the AAAI Conference on Artificial Intelligence","first-page":"12797","article-title":"Rethinking the image fusion: a fast unified image fusion network based on proportional maintenance of gradient and intensity","author":"Zhang","year":"2020"},{"issue":"7","key":"10.1016\/j.sigpro.2020.107936_bib0051","doi-asserted-by":"crossref","first-page":"1334","DOI":"10.1016\/j.sigpro.2009.01.012","article-title":"Multifocus image fusion using the nonsubsampled contourlet transform","volume":"89","author":"Zhang","year":"2009","journal-title":"Signal Process."},{"issue":"40","key":"10.1016\/j.sigpro.2020.107936_bib0052","doi-asserted-by":"crossref","first-page":"57","DOI":"10.1016\/j.inffus.2017.05.006","article-title":"Sparse representation based multi-sensor image fusion for multi-focus and multi-modality images: a review","volume":"40","author":"Zhang","year":"2018","journal-title":"Inf. Fusion"},{"issue":"8","key":"10.1016\/j.sigpro.2020.107936_bib0053","doi-asserted-by":"crossref","first-page":"1400","DOI":"10.1364\/JOSAA.34.001400","article-title":"Infrared and visible image fusion via saliency analysis and local edge-preserving multi-scale decomposition","volume":"34","author":"Zhang","year":"2017","journal-title":"J. Opt. Soc. Am. A"},{"issue":"2","key":"10.1016\/j.sigpro.2020.107936_bib0054","first-page":"357","article-title":"Infrared and visible image fusion based on visual saliency and NSCT","volume":"46","author":"Zhi-Zhong","year":"2017","journal-title":"J. Univ. Electron. Sci.Technol. China"},{"key":"10.1016\/j.sigpro.2020.107936_bib0055","doi-asserted-by":"crossref","first-page":"516","DOI":"10.1016\/j.ins.2017.09.010","article-title":"A novel multi-modality image fusion method based on image decomposition and sparse representation","volume":"432","author":"Zhu","year":"2017","journal-title":"Inf. Sci."}],"container-title":["Signal Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0165168420304801?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0165168420304801?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2025,9,28]],"date-time":"2025-09-28T04:52:37Z","timestamp":1759035157000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0165168420304801"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,5]]},"references-count":55,"alternative-id":["S0165168420304801"],"URL":"https:\/\/doi.org\/10.1016\/j.sigpro.2020.107936","relation":{},"ISSN":["0165-1684"],"issn-type":[{"value":"0165-1684","type":"print"}],"subject":[],"published":{"date-parts":[[2021,5]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"A saliency-based multiscale approach for infrared and visible image fusion","name":"articletitle","label":"Article Title"},{"value":"Signal Processing","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.sigpro.2020.107936","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2020 Elsevier B.V. All rights reserved.","name":"copyright","label":"Copyright"}],"article-number":"107936"}}