{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,25]],"date-time":"2025-10-25T12:55:28Z","timestamp":1761396928458,"version":"3.28.0"},"reference-count":49,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2024,9,25]],"date-time":"2024-09-25T00:00:00Z","timestamp":1727222400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,9,25]],"date-time":"2024-09-25T00:00:00Z","timestamp":1727222400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2024,10]]},"DOI":"10.1007\/s00530-024-01506-6","type":"journal-article","created":{"date-parts":[[2024,9,25]],"date-time":"2024-09-25T20:50:33Z","timestamp":1727297433000},"update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Multi-focus image fusion method based on adaptive weighting and interactive information modulation"],"prefix":"10.1007","volume":"30","author":[{"given":"Jinyuan","family":"Jiang","sequence":"first","affiliation":[]},{"given":"Hao","family":"Zhai","sequence":"additional","affiliation":[]},{"given":"You","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Xuan","family":"Xiao","sequence":"additional","affiliation":[]},{"given":"Xinbo","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,25]]},"reference":[{"issue":"10","key":"1506_CR1","doi-asserted-by":"crossref","first-page":"1362","DOI":"10.3390\/e23101362","volume":"23","author":"H Wan","year":"2021","unstructured":"Wan, H., Tang, X., Zhu, Z., Li, W.: Multi-focus image fusion method based on multi-scale decomposition of information complementary. Entropy 23(10), 1362 (2021)","journal-title":"Entropy"},{"issue":"3","key":"1506_CR2","doi-asserted-by":"crossref","first-page":"480","DOI":"10.1364\/JOSAA.35.000480","volume":"35","author":"J Sun","year":"2018","unstructured":"Sun, J., Han, Q., Kou, L., Zhang, L., Zhang, K., Jin, Z.: Multi-focus image fusion algorithm based on laplacian pyramids. JOSA A 35(3), 480\u2013490 (2018)","journal-title":"JOSA A"},{"key":"1506_CR3","doi-asserted-by":"crossref","unstructured":"Chen, Y., Liu, Y., Ward, R.K., Chen, X.: Multi-focus image fusion with complex sparse representation. IEEE Sensors Journal, 1\u20131 (2024)","DOI":"10.1109\/JSEN.2024.3411588"},{"key":"1506_CR4","volume":"92","author":"J Tan","year":"2021","unstructured":"Tan, J., Zhang, T., Zhao, L., Luo, X., Tang, Y.Y.: Multi-focus image fusion with geometrical sparse representation. Signal Processing: Image Communication 92, 116130 (2021)","journal-title":"Signal Processing: Image Communication"},{"key":"1506_CR5","first-page":"125","volume":"78","author":"X Ma","year":"2019","unstructured":"Ma, X., Hu, S., Liu, S., Fang, J., Xu, S.: Multi-focus image fusion based on joint sparse representation and optimum theory. Signal Processing: Image Communication 78, 125\u2013134 (2019)","journal-title":"Signal Processing: Image Communication"},{"key":"1506_CR6","doi-asserted-by":"crossref","DOI":"10.1016\/j.compeleceng.2021.107174","volume":"92","author":"Y Zhou","year":"2021","unstructured":"Zhou, Y., Yang, X., Zhang, R., Liu, K., Anisetti, M., Jeon, G.: Gradient-based multi-focus image fusion method using convolution neural network. Computers & Electrical Engineering 92, 107174 (2021)","journal-title":"Computers & Electrical Engineering"},{"key":"1506_CR7","doi-asserted-by":"crossref","first-page":"655","DOI":"10.1109\/TMM.2021.3057493","volume":"24","author":"J Chen","year":"2021","unstructured":"Chen, J., Li, X., Luo, L., Ma, J.: Multi-focus image fusion based on multi-scale gradients and image matting. IEEE Trans. Multimedia 24, 655\u2013667 (2021)","journal-title":"IEEE Trans. Multimedia"},{"issue":"6","key":"1506_CR8","doi-asserted-by":"crossref","first-page":"2888","DOI":"10.3390\/s23062888","volume":"23","author":"L Li","year":"2023","unstructured":"Li, L., Lv, M., Jia, Z., Ma, H.: Sparse representation-based multi-focus image fusion method via local energy in shearlet domain. Sensors 23(6), 2888 (2023)","journal-title":"Sensors"},{"key":"1506_CR9","volume":"189","author":"Y Wang","year":"2021","unstructured":"Wang, Y., Li, X., Zhu, R., Wang, Z., Feng, Y., Zhang, X.: A multi-focus image fusion framework based on multi-scale sparse representation in gradient domain. Signal Process. 189, 108254 (2021)","journal-title":"Signal Process."},{"key":"1506_CR10","volume":"189","author":"Y Wang","year":"2021","unstructured":"Wang, Y., Li, X., Zhu, R., Wang, Z., Feng, Y., Zhang, X.: A multi-focus image fusion framework based on multi-scale sparse representation in gradient domain. Signal Process. 189, 108254 (2021)","journal-title":"Signal Process."},{"key":"1506_CR11","doi-asserted-by":"crossref","first-page":"15750","DOI":"10.1109\/ACCESS.2017.2735019","volume":"5","author":"C Du","year":"2017","unstructured":"Du, C., Gao, S.: Image segmentation-based multi-focus image fusion through multi-scale convolutional neural network. IEEE Access 5, 15750\u201315761 (2017)","journal-title":"IEEE Access"},{"key":"1506_CR12","doi-asserted-by":"crossref","first-page":"9","DOI":"10.1016\/j.neucom.2019.01.048","volume":"335","author":"J Ma","year":"2019","unstructured":"Ma, J., Zhou, Z., Wang, B., Miao, L., Zong, H.: Multi-focus image fusion using boosted random walks-based algorithm with two-scale focus maps. Neurocomputing 335, 9\u201320 (2019)","journal-title":"Neurocomputing"},{"key":"1506_CR13","volume":"105","author":"J Wang","year":"2024","unstructured":"Wang, J., Qu, H., Zhang, Z., Xie, M.: New insights into multi-focus image fusion: A fusion method based on multi-dictionary linear sparse representation and region fusion model. Information Fusion 105, 102230 (2024)","journal-title":"Information Fusion"},{"key":"1506_CR14","doi-asserted-by":"crossref","first-page":"96","DOI":"10.1016\/j.inffus.2018.01.009","volume":"45","author":"MS Farid","year":"2019","unstructured":"Farid, M.S., Mahmood, A., Al-Maadeed, S.A.: Multi-focus image fusion using content adaptive blurring. Information fusion 45, 96\u2013112 (2019)","journal-title":"Information fusion"},{"issue":"4","key":"1506_CR15","doi-asserted-by":"crossref","first-page":"5182","DOI":"10.1364\/OE.21.005182","volume":"21","author":"L Chen","year":"2013","unstructured":"Chen, L., Li, J., Chen, C.P.: Regional multifocus image fusion using sparse representation. Opt. Express 21(4), 5182\u20135197 (2013)","journal-title":"Opt. Express"},{"key":"1506_CR16","doi-asserted-by":"crossref","first-page":"171","DOI":"10.1016\/j.sigpro.2016.01.014","volume":"125","author":"J Xiao","year":"2016","unstructured":"Xiao, J., Liu, T., Zhang, Y., Zou, B., Lei, J., Li, Q.: Multi-focus image fusion based on depth extraction with inhomogeneous diffusion equation. Signal Process. 125, 171\u2013186 (2016)","journal-title":"Signal Process."},{"issue":"1","key":"1506_CR17","doi-asserted-by":"crossref","first-page":"40","DOI":"10.1007\/s11036-020-01719-9","volume":"26","author":"W Zhao","year":"2021","unstructured":"Zhao, W., Yang, H., Wang, J., Pan, X., Cao, Z.: Region-and pixel-level multi-focus image fusion through convolutional neural networks. Mobile Networks and Applications 26(1), 40\u201356 (2021)","journal-title":"Mobile Networks and Applications"},{"key":"1506_CR18","doi-asserted-by":"crossref","first-page":"139","DOI":"10.1016\/j.inffus.2014.05.004","volume":"23","author":"Y Liu","year":"2015","unstructured":"Liu, Y., Liu, S., Wang, Z.: Multi-focus image fusion with dense sift. Information Fusion 23, 139\u2013155 (2015)","journal-title":"Information Fusion"},{"key":"1506_CR19","doi-asserted-by":"crossref","first-page":"191","DOI":"10.1016\/j.inffus.2016.12.001","volume":"36","author":"Y Liu","year":"2017","unstructured":"Liu, Y., Chen, X., Peng, H., Wang, Z.: Multi-focus image fusion with a deep convolutional neural network. Information Fusion 36, 191\u2013207 (2017)","journal-title":"Information Fusion"},{"key":"1506_CR20","doi-asserted-by":"crossref","DOI":"10.1016\/j.jvcir.2022.103485","volume":"84","author":"K Bhalla","year":"2022","unstructured":"Bhalla, K., Koundal, D., Sharma, B., Hu, Y.-C., Zaguia, A.: A fuzzy convolutional neural network for enhancing multi-focus image fusion. J. Vis. Commun. Image Represent. 84, 103485 (2022)","journal-title":"J. Vis. Commun. Image Represent."},{"key":"1506_CR21","doi-asserted-by":"crossref","first-page":"4816","DOI":"10.1109\/TIP.2020.2976190","volume":"29","author":"J Li","year":"2020","unstructured":"Li, J., Guo, X., Lu, G., Zhang, B., Xu, Y., Wu, F., Zhang, D.: Drpl: Deep regression pair learning for multi-focus image fusion. IEEE Trans. Image Process. 29, 4816\u20134831 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"1506_CR22","doi-asserted-by":"crossref","first-page":"99","DOI":"10.1016\/j.inffus.2019.07.011","volume":"54","author":"Y Zhang","year":"2020","unstructured":"Zhang, Y., Liu, Y., Sun, P., Yan, H., Zhao, X., Zhang, L.: Ifcnn: A general image fusion framework based on convolutional neural network. Information Fusion 54, 99\u2013118 (2020)","journal-title":"Information Fusion"},{"issue":"21","key":"1506_CR23","doi-asserted-by":"crossref","first-page":"9755","DOI":"10.1109\/JSEN.2019.2928818","volume":"19","author":"H Li","year":"2019","unstructured":"Li, H., Nie, R., Cao, J., Guo, X., Zhou, D., He, K.: Multi-focus image fusion using u-shaped networks with a hybrid objective. IEEE Sens. J. 19(21), 9755\u20139765 (2019)","journal-title":"IEEE Sens. J."},{"key":"1506_CR24","unstructured":"Vaswani, A.: Attention is all you need. arXiv preprint arXiv:1706.03762 (2017)"},{"key":"1506_CR25","doi-asserted-by":"crossref","unstructured":"Samplawski, C., Marlin, B.M.: Towards transformer-based real-time object detection at the edge: A benchmarking study. In: MILCOM 2021-2021 IEEE Military Communications Conference (MILCOM), pp. 898\u2013903 (2021). IEEE","DOI":"10.1109\/MILCOM52596.2021.9653052"},{"key":"1506_CR26","doi-asserted-by":"crossref","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: European Conference on Computer Vision, pp. 213\u2013229 (2020). Springer","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"1506_CR27","doi-asserted-by":"crossref","unstructured":"Ke, Z., Qiu, D., Li, K., Yan, Q., Lau, R.W.: Guided collaborative training for pixel-wise semi-supervised learning. In: Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part XIII 16, pp. 429\u2013445 (2020). Springer","DOI":"10.1007\/978-3-030-58601-0_26"},{"key":"1506_CR28","volume":"76","author":"P Wu","year":"2023","unstructured":"Wu, P., Jiang, L., Hua, Z., Li, J.: Multi-focus image fusion: Transformer and shallow feature attention matters. Displays 76, 102353 (2023)","journal-title":"Displays"},{"issue":"7","key":"1506_CR29","doi-asserted-by":"crossref","first-page":"1200","DOI":"10.1109\/JAS.2022.105686","volume":"9","author":"J Ma","year":"2022","unstructured":"Ma, J., Tang, L., Fan, F., Huang, J., Mei, X., Ma, Y.: Swinfusion: Cross-domain long-range learning for general image fusion via swin transformer. IEEE\/CAA Journal of Automatica Sinica 9(7), 1200\u20131217 (2022)","journal-title":"IEEE\/CAA Journal of Automatica Sinica"},{"issue":"3","key":"1506_CR30","doi-asserted-by":"crossref","first-page":"234","DOI":"10.1006\/acha.2000.0343","volume":"10","author":"N Kingsbury","year":"2001","unstructured":"Kingsbury, N.: Complex wavelets for shift invariant analysis and filtering of signals. Appl. Comput. Harmon. Anal. 10(3), 234\u2013253 (2001)","journal-title":"Appl. Comput. Harmon. Anal."},{"key":"1506_CR31","unstructured":"Kingsbury, N.G.: The dual-tree complex wavelet transform: a new technique for shift invariance and directional filters. In: IEEE Digital Signal Processing Workshop, vol. 86, pp. 120\u2013131 (1998). Citeseer"},{"issue":"12","key":"1506_CR32","doi-asserted-by":"crossref","first-page":"22408","DOI":"10.3390\/s141222408","volume":"14","author":"Y Yang","year":"2014","unstructured":"Yang, Y., Tong, S., Huang, S., Lin, P.: Dual-tree complex wavelet transform and image block residual-based multi-focus image fusion in visual sensor networks. Sensors 14(12), 22408\u201322430 (2014)","journal-title":"Sensors"},{"issue":"6","key":"1506_CR33","doi-asserted-by":"crossref","first-page":"470","DOI":"10.1007\/s11801-018-8046-5","volume":"14","author":"M-X Yang","year":"2018","unstructured":"Yang, M.-X., Tang, G.-J., Liu, X.-H., Wang, L.-Q., Cui, Z.-G., Luo, S.-H.: Low-light image enhancement based on retinex theory and dual-tree complex wavelet transform. Optoelectron. Lett. 14(6), 470\u2013475 (2018)","journal-title":"Optoelectron. Lett."},{"key":"1506_CR34","doi-asserted-by":"crossref","first-page":"47303","DOI":"10.1109\/ACCESS.2019.2909788","volume":"7","author":"D Li","year":"2019","unstructured":"Li, D., Zhang, L., Sun, C., Yin, T., Liu, C., Yang, J.: Robust retinal image enhancement via dual-tree complex wavelet transform and morphology-based method. IEEE Access 7, 47303\u201347316 (2019)","journal-title":"IEEE Access"},{"key":"1506_CR35","doi-asserted-by":"crossref","unstructured":"Kiran, S.: Optimization of decomposition techniques for hybrid wavelet based image fusion algorithm using nsct and dtcwt. In: 2022 International Conference on Augmented Intelligence and Sustainable Systems (ICAISS), pp. 630\u2013636 (2022). IEEE","DOI":"10.1109\/ICAISS55157.2022.10010828"},{"key":"1506_CR36","doi-asserted-by":"crossref","first-page":"5576","DOI":"10.1007\/s00034-019-01131-z","volume":"38","author":"DP Bavirisetti","year":"2019","unstructured":"Bavirisetti, D.P., Xiao, G., Zhao, J., Dhuli, R., Liu, G.: Multi-scale guided image and video fusion: A fast and efficient approach. Circuits Systems Signal Process. 38, 5576\u20135605 (2019)","journal-title":"Circuits Systems Signal Process."},{"key":"1506_CR37","doi-asserted-by":"crossref","first-page":"40","DOI":"10.1016\/j.inffus.2020.08.022","volume":"66","author":"H Zhang","year":"2021","unstructured":"Zhang, H., Le, Z., Shao, Z., Xu, H., Ma, J.: Mff-gan: An unsupervised generative adversarial network with adaptive and gradient joint constraints for multi-focus image fusion. Information Fusion 66, 40\u201353 (2021)","journal-title":"Information Fusion"},{"issue":"1","key":"1506_CR38","doi-asserted-by":"crossref","first-page":"502","DOI":"10.1109\/TPAMI.2020.3012548","volume":"44","author":"H Xu","year":"2020","unstructured":"Xu, H., Ma, J., Jiang, J., Guo, X., Ling, H.: U2fusion: A unified unsupervised image fusion network. IEEE Trans. Pattern Anal. Mach. Intell. 44(1), 502\u2013518 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"1506_CR39","doi-asserted-by":"crossref","first-page":"5793","DOI":"10.1007\/s00521-020-05358-9","volume":"33","author":"B Ma","year":"2021","unstructured":"Ma, B., Zhu, Y., Yin, X., Ban, X., Huang, H., Mukeshimana, M.: Sesf-fuse: An unsupervised deep model for multi-focus image fusion. Neural Comput. Appl. 33, 5793\u20135804 (2021)","journal-title":"Neural Comput. Appl."},{"key":"1506_CR40","doi-asserted-by":"crossref","first-page":"127","DOI":"10.1016\/j.inffus.2022.11.014","volume":"92","author":"X Hu","year":"2023","unstructured":"Hu, X., Jiang, J., Liu, X., Ma, J.: Zmff: Zero-shot multi-focus image fusion. Information Fusion 92, 127\u2013138 (2023)","journal-title":"Information Fusion"},{"key":"1506_CR41","volume":"238","author":"M Li","year":"2024","unstructured":"Li, M., Pei, R., Zheng, T., Zhang, Y., Fu, W.: Fusiondiff: Multi-focus image fusion using denoising diffusion probabilistic models. Expert Syst. Appl. 238, 121664 (2024)","journal-title":"Expert Syst. Appl."},{"key":"1506_CR42","doi-asserted-by":"crossref","first-page":"199","DOI":"10.1016\/j.optcom.2014.12.032","volume":"341","author":"G Cui","year":"2015","unstructured":"Cui, G., Feng, H., Xu, Z., Li, Q., Chen, Y.: Detail preserved fusion of visible and infrared images using regional saliency extraction and multi-scale image decomposition. Optics Communications 341, 199\u2013209 (2015)","journal-title":"Optics Communications"},{"issue":"4","key":"1506_CR43","doi-asserted-by":"crossref","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. IEEE Trans. Image Process. 13(4), 600\u2013612 (2004)","journal-title":"IEEE Trans. Image Process."},{"issue":"10","key":"1506_CR44","doi-asserted-by":"crossref","first-page":"1421","DOI":"10.1016\/j.imavis.2007.12.002","volume":"27","author":"Y Chen","year":"2009","unstructured":"Chen, Y., Blum, R.S.: A new automated quality assessment algorithm for image fusion. Image Vis. Comput. 27(10), 1421\u20131432 (2009)","journal-title":"Image Vis. Comput."},{"key":"1506_CR45","doi-asserted-by":"crossref","first-page":"469","DOI":"10.1016\/B978-0-12-372529-5.00017-2","volume":"19","author":"Q Wang","year":"2008","unstructured":"Wang, Q., Shen, Y., Jin, J.: Performance evaluation of image fusion techniques. Image fusion: algorithms and applications 19, 469\u2013492 (2008)","journal-title":"Image fusion: algorithms and applications"},{"issue":"4","key":"1506_CR46","doi-asserted-by":"crossref","first-page":"308","DOI":"10.1049\/el:20000267","volume":"36","author":"CS Xydeas","year":"2000","unstructured":"Xydeas, C.S., Petrovic, V., et al.: Objective image fusion performance measure. Electron. Lett. 36(4), 308\u2013309 (2000)","journal-title":"Electron. Lett."},{"issue":"2","key":"1506_CR47","doi-asserted-by":"crossref","first-page":"177","DOI":"10.1016\/j.inffus.2005.04.003","volume":"8","author":"Y Zheng","year":"2007","unstructured":"Zheng, Y., Essock, E.A., Hansen, B.C., Haun, A.M.: A new metric based on extended spatial frequency and its application to dwt based fusion algorithms. Information Fusion 8(2), 177\u2013192 (2007)","journal-title":"Information Fusion"},{"key":"1506_CR48","doi-asserted-by":"crossref","unstructured":"Hossny, M., Nahavandi, S., Creighton, D.: Comments on\u2019information measure for performance of image fusion\u2019 44(18) (2008)","DOI":"10.1049\/el:20081754"},{"issue":"2","key":"1506_CR49","doi-asserted-by":"crossref","first-page":"127","DOI":"10.1016\/j.inffus.2011.08.002","volume":"14","author":"Y Han","year":"2013","unstructured":"Han, Y., Cai, Y., Cao, Y., Xu, X.: A new image fusion performance metric based on visual information fidelity. Information fusion 14(2), 127\u2013135 (2013)","journal-title":"Information fusion"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01506-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-024-01506-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01506-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T18:17:09Z","timestamp":1730139429000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-024-01506-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,25]]},"references-count":49,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2024,10]]}},"alternative-id":["1506"],"URL":"https:\/\/doi.org\/10.1007\/s00530-024-01506-6","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"type":"print","value":"0942-4962"},{"type":"electronic","value":"1432-1882"}],"subject":[],"published":{"date-parts":[[2024,9,25]]},"assertion":[{"value":"19 April 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"17 September 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 September 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conficts of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"290"}}