{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T08:08:29Z","timestamp":1774685309523,"version":"3.50.1"},"publisher-location":"Singapore","reference-count":53,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819665938","type":"print"},{"value":"9789819665945","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,7,22]],"date-time":"2025-07-22T00:00:00Z","timestamp":1753142400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,7,22]],"date-time":"2025-07-22T00:00:00Z","timestamp":1753142400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-981-96-6594-5_9","type":"book-chapter","created":{"date-parts":[[2025,7,21]],"date-time":"2025-07-21T07:39:29Z","timestamp":1753083569000},"page":"106-119","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["XFusion: Cross-Attention Transformer for\u00a0Multi-focus Image Fusion"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0006-7494-1411","authenticated-orcid":false,"given":"Shouxi","family":"Zhao","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0000-6386-527X","authenticated-orcid":false,"given":"Tianren","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7955-0782","authenticated-orcid":false,"given":"Qin","family":"Zou","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9136-3656","authenticated-orcid":false,"given":"Chi","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9796-488X","authenticated-orcid":false,"given":"Zhongyuan","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,7,22]]},"reference":[{"key":"9_CR1","doi-asserted-by":"publisher","first-page":"201","DOI":"10.1016\/j.inffus.2019.02.003","volume":"51","author":"M Amin-Naji","year":"2019","unstructured":"Amin-Naji, M., Aghagolzadeh, A., Ezoji, M.: Ensemble of CNN for multi-focus image fusion. An Int. J. Inform. Fusion 51, 201\u2013214 (2019)","journal-title":"An Int. J. Inform. Fusion"},{"key":"9_CR2","doi-asserted-by":"publisher","first-page":"199","DOI":"10.1016\/j.optcom.2014.12.032","volume":"341","author":"G Cui","year":"2015","unstructured":"Cui, G., Feng, H., Xu, Z., Li, Q., Chen, Y.: Detail preserved fusion of visible and infrared images using regional saliency extraction and multi-scale image decomposition. Optics Commun. 341, 199\u2013209 (2015)","journal-title":"Optics Commun."},{"key":"9_CR3","doi-asserted-by":"publisher","first-page":"674","DOI":"10.1007\/978-3-031-19842-7_39","volume-title":"Computer Vision \u2013 ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23\u201327, 2022, Proceedings, Part XXXIX","author":"F Duffhauss","year":"2022","unstructured":"Duffhauss, F., Vien, N.A., Ziesche, H., Neumann, G.: FusionVAE: a deep hierarchical variational autoencoder for\u00a0RGB image fusion. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) Computer Vision \u2013 ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23\u201327, 2022, Proceedings, Part XXXIX, pp. 674\u2013691. Springer Nature Switzerland, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19842-7_39"},{"key":"9_CR4","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Houlsby, N.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"9_CR5","doi-asserted-by":"crossref","unstructured":"Zhao, Z., et al.: CDDFuse: correlation-driven dual-branch feature decomposition for multi-modality image fusion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)","DOI":"10.1109\/CVPR52729.2023.00572"},{"issue":"12","key":"9_CR6","doi-asserted-by":"publisher","first-page":"2959","DOI":"10.1109\/26.477498","volume":"43","author":"AM Eskicioglu","year":"1995","unstructured":"Eskicioglu, A.M., Fisher, P.S.: Image quality measures and their performance. IEEE Trans. Commun. 43(12), 2959\u20132965 (1995)","journal-title":"IEEE Trans. Commun."},{"key":"9_CR7","doi-asserted-by":"crossref","unstructured":"Godard, C., Mac Aodha, O., Firman, M., Brostow, G.J.: Digging into self-supervised monocular depth estimation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV) (2018)","DOI":"10.1109\/ICCV.2019.00393"},{"issue":"8","key":"9_CR8","doi-asserted-by":"publisher","first-page":"1982","DOI":"10.1109\/TMM.2019.2895292","volume":"21","author":"X Guo","year":"2019","unstructured":"Guo, X., Nie, R., Cao, J., Zhou, D., Mei, L., He, K.: Fusegan: learning to fuse multi-focus image via conditional generative adversarial network. IEEE Trans. Multimedia 21(8), 1982\u20131996 (2019)","journal-title":"IEEE Trans. Multimedia"},{"key":"9_CR9","doi-asserted-by":"crossref","unstructured":"Haghighat, M., Razian, M.A.: Fast-FMI: Non-reference image fusion metric. IEEE International Conference on Application of Information AND Communication Technologies (2014)","DOI":"10.1109\/ICAICT.2014.7036000"},{"key":"9_CR10","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"9_CR11","doi-asserted-by":"publisher","first-page":"127","DOI":"10.1016\/j.inffus.2022.11.014","volume":"92","author":"X Hu","year":"2023","unstructured":"Hu, X., Jiang, J., Liu, X., Ma, J.: ZMFF: Zero-shot multi-focus image fusion. Inform. Fusion 92, 127\u2013138 (2023). https:\/\/doi.org\/10.1016\/j.inffus.2022.11.014","journal-title":"Inform. Fusion"},{"issue":"99","key":"9_CR12","doi-asserted-by":"publisher","first-page":"3845","DOI":"10.1109\/TIP.2020.2966075","volume":"29","author":"H Jung","year":"2020","unstructured":"Jung, H., Kim, Y., Jang, H., Ha, N., Sohn, K.: Unsupervised deep image fusion with structure tensor representations. IEEE Trans. Image Process. 29(99), 3845\u20133858 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"9_CR13","doi-asserted-by":"publisher","first-page":"114385","DOI":"10.1109\/ACCESS.2019.2935006","volume":"7","author":"R Lai","year":"2019","unstructured":"Lai, R., Li, Y., Guan, J., Xiong, A.: Multi-scale visual attention deep convolutional neural network for multi-focus image fusion. IEEE Access 7, 114385\u2013114399 (2019). https:\/\/doi.org\/10.1109\/ACCESS.2019.2935006","journal-title":"IEEE Access"},{"key":"9_CR14","doi-asserted-by":"publisher","first-page":"4816","DOI":"10.1109\/TIP.2020.2976190","volume":"29","author":"J Li","year":"2020","unstructured":"Li, J., et al.: DRPL: deep regression pair learning for multi-focus image fusion. IEEE Trans. Image Process. 29, 4816\u20134831 (2020). https:\/\/doi.org\/10.1109\/TIP.2020.2976190","journal-title":"IEEE Trans. Image Process."},{"key":"9_CR15","doi-asserted-by":"publisher","first-page":"719","DOI":"10.1007\/978-3-031-19797-0_41","volume-title":"Computer Vision \u2013 ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23\u201327, 2022, Proceedings, Part XVIII","author":"P Liang","year":"2022","unstructured":"Liang, P., Jiang, J., Liu, X., Ma, J.: Fusion from\u00a0decomposition: a self-supervised decomposition approach for\u00a0image fusion. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) Computer Vision \u2013 ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23\u201327, 2022, Proceedings, Part XVIII, pp. 719\u2013735. Springer Nature Switzerland, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19797-0_41"},{"key":"9_CR16","doi-asserted-by":"publisher","unstructured":"Liu, J., Cao, F., Gao, X.-Z., Yu, L., Liang, J.: A cluster-weighted kernel k-means method for multi-view clustering. In: Proceedings of the AAAI Conference Artificial Intelligence, Vol. 34, No. 04, pp. 4860\u20134867 (2020). https:\/\/doi.org\/10.1609\/aaai.v34i04.5922","DOI":"10.1609\/aaai.v34i04.5922"},{"key":"9_CR17","doi-asserted-by":"publisher","DOI":"10.1016\/j.image.2021.116533","volume":"100","author":"S Liu","year":"2022","unstructured":"Liu, S., et al.: A multi-focus color image fusion algorithm based on low vision image reconstruction and focused feature extraction. Signal Process. Image Commun. 100, 116533 (2022)","journal-title":"Signal Process. Image Commun."},{"key":"9_CR18","doi-asserted-by":"publisher","first-page":"191","DOI":"10.1016\/j.inffus.2016.12.001","volume":"36","author":"Y Liu","year":"2017","unstructured":"Liu, Y., Chen, X., Peng, H., Wang, Z.: Multi-focus image fusion with a deep convolutional neural network. Inform. Fusion 36, 191\u2013207 (2017)","journal-title":"Inform. Fusion"},{"issue":"5","key":"9_CR19","first-page":"2614","volume":"28","author":"H Li","year":"2018","unstructured":"Li, H., Xiao-Jun, W.: Densefuse: a fusion approach to infrared and visible images. IEEE TIP 28(5), 2614\u20132623 (2018)","journal-title":"IEEE TIP"},{"issue":"13","key":"9_CR20","doi-asserted-by":"publisher","first-page":"20139","DOI":"10.1007\/s11042-022-14314-9","volume":"82","author":"H Liu","year":"2023","unstructured":"Liu, H., Yan, H.: An end-to-end multi-scale network based on autoencoder for infrared and visible image fusion. Multimedia Tools Appl. 82(13), 20139\u201320156 (2023)","journal-title":"Multimedia Tools Appl."},{"issue":"10","key":"9_CR21","doi-asserted-by":"publisher","first-page":"2761","DOI":"10.1007\/s11263-021-01501-8","volume":"129","author":"H Zhang","year":"2021","unstructured":"Zhang, H., Ma, J.: SDNet: a versatile squeeze-and decomposition network for real-time image fusion. Int. J. Comput. Vision 129(10), 2761\u20132785 (2021)","journal-title":"Int. J. Comput. Vision"},{"key":"9_CR22","doi-asserted-by":"crossref","unstructured":"Liu, R., Liu, Z., Liu, J. and Fan, X.: Searching a hierarchically aggregated fusion architecture for fast multimodality image fusion. In: Proceedings of the 29th ACM International Conference on Multimedia (2021)","DOI":"10.1145\/3474085.3475299"},{"key":"9_CR23","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.inffus.2022.06.001","volume":"86","author":"Y Liu","year":"2022","unstructured":"Liu, Y., Wang, L., Li, H., Chen, X.: Multi-focus image fusion with deep residual learning and focus property detection. Inform. Fusion 86, 1\u201316 (2022)","journal-title":"Inform. Fusion"},{"key":"9_CR24","doi-asserted-by":"crossref","unstructured":"Liu, Z.: Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV) (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"issue":"7","key":"9_CR25","doi-asserted-by":"publisher","first-page":"3312","DOI":"10.1109\/TIP.2019.2895768","volume":"28","author":"A Lucas","year":"2019","unstructured":"Lucas, A., Lopez-Tapiad, S., Molinae, R., Katsaggelos, A.K.: Generative adversarial networks and perceptual losses for video super-resolution. IEEE Trans. Image Process. 28(7), 3312\u20133327 (2019)","journal-title":"IEEE Trans. Image Process."},{"key":"9_CR26","doi-asserted-by":"publisher","first-page":"1529","DOI":"10.1109\/TIP.2023.3242824","volume":"32","author":"J Luo","year":"2023","unstructured":"Luo, J., Ren, W., Gao, X., Cao, X.: Multi-exposure image fusion via deformable self-attention. IEEE Trans. Image Process. 32, 1529\u20131540 (2023)","journal-title":"IEEE Trans. Image Process."},{"issue":"11","key":"9_CR27","doi-asserted-by":"publisher","first-page":"5793","DOI":"10.1007\/s00521-020-05358-9","volume":"33","author":"B Ma","year":"2021","unstructured":"Ma, B., Zhu, Y., Yin, X., Ban, X., Huang, H., Mukeshimana, M.: SESF-Fuse: an unsupervised deep model for multi-focus image fusion. Neural Comput. Appl. 33(11), 5793\u20135804 (2021)","journal-title":"Neural Comput. Appl."},{"key":"9_CR28","doi-asserted-by":"publisher","first-page":"8668","DOI":"10.1109\/TIP.2020.3018261","volume":"29","author":"H Ma","year":"2020","unstructured":"Ma, H., Liao, Q., Zhang, J., Liu, S., Xue, J.H.: An a-matte boundary defocus model-based cascaded network for multi-focus image fusion. IEEE Trans. Image Process. 29, 8668\u20138679 (2020)","journal-title":"IEEE Trans. Image Process."},{"issue":"7","key":"9_CR29","doi-asserted-by":"publisher","first-page":"1200","DOI":"10.1109\/JAS.2022.105686","volume":"9","author":"J Ma","year":"2022","unstructured":"Ma, J., Tang, L., Fan, F., Huang, J., Mei, X., Ma, Y.: Swinfusion: cross-domain long-range learning for general image fusion via swin transformer. IEEE\/CAA J. Automatica Sinica 9(7), 1200\u20131217 (2022)","journal-title":"IEEE\/CAA J. Automatica Sinica"},{"key":"9_CR30","doi-asserted-by":"publisher","first-page":"153","DOI":"10.1007\/978-3-030-20912-4_15","volume-title":"Artificial Intelligence and Soft Computing: 18th International Conference, ICAISC 2019, Zakopane, Poland, June 16\u201320, 2019, Proceedings, Part I","author":"HT Mustafa","year":"2019","unstructured":"Mustafa, H.T., Liu, F., Yang, J., Khan, Z., Huang, Q.: Dense multi-focus fusion net: a deep unsupervised convolutional network for multi-focus image fusion. In: Rutkowski, L., Scherer, R., Korytkowski, M., Pedrycz, W., Tadeusiewicz, R., Zurada, J.M. (eds.) Artificial Intelligence and Soft Computing: 18th International Conference, ICAISC 2019, Zakopane, Poland, June 16\u201320, 2019, Proceedings, Part I, pp. 153\u2013163. Springer International Publishing, Cham (2019). https:\/\/doi.org\/10.1007\/978-3-030-20912-4_15"},{"key":"9_CR31","doi-asserted-by":"publisher","first-page":"26","DOI":"10.1016\/j.imavis.2019.03.001","volume":"85","author":"HT Mustafa","year":"2019","unstructured":"Mustafa, H.T., Yang, J., Zareapoor, M.: Multi-scale convolutional neural network for multi-focus image fusion. Image Vis. Comput. 85, 26\u201335 (2019)","journal-title":"Image Vis. Comput."},{"key":"9_CR32","doi-asserted-by":"publisher","DOI":"10.1016\/j.image.2020.115864","volume":"85","author":"HT Mustafa","year":"2020","unstructured":"Mustafa, H.T., Zareapoor, M., Yang, J.: Mldnet: multi-level dense network for multi-focus image fusion. Signal Process. Image Commun. 85, 115864 (2020)","journal-title":"Signal Process. Image Commun."},{"issue":"7","key":"9_CR33","doi-asserted-by":"publisher","first-page":"313","DOI":"10.1049\/el:20020212","volume":"38","author":"G Qu","year":"2002","unstructured":"Qu, G., Zhang, D., Yan, P.: Information measure for performance of image fusion. Electron. Lett. 38(7), 313\u2013315 (2002)","journal-title":"Electron. Lett."},{"key":"9_CR34","first-page":"52","volume":"2","author":"B Rajalingam","year":"2018","unstructured":"Rajalingam, B., Priya, R.: Hybrid multimodality medical image fusion technique for feature enhancement in medical diagnosis. Int. J. Eng. Sci. Invent. 2, 52\u201360 (2018)","journal-title":"Int. J. Eng. Sci. Invent."},{"key":"9_CR35","doi-asserted-by":"publisher","first-page":"1125","DOI":"10.1007\/s11760-012-0361-x","volume":"7","author":"S Kumar","year":"2013","unstructured":"Kumar, S.: Multifocus and multispectral image fusion based on pixel significance using discrete cosine harmonic wavelet transform. SIViP 7, 1125\u20131143 (2013)","journal-title":"SIViP"},{"key":"9_CR36","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances In Neural Information Processing Systems (2017)"},{"key":"9_CR37","unstructured":"Vibashan, V., Valanarasu, J., Oza, P., Patel, V.: Image fusion transformer. In: IEEE International Conference on Image Processing (ICIP) (2022)"},{"issue":"4","key":"9_CR38","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. IEEE Trans. Image Process. 13(4), 600\u2013612 (2004)","journal-title":"IEEE Trans. Image Process."},{"key":"9_CR39","doi-asserted-by":"crossref","unstructured":"Xiao, B., Wu, H., Bi, X.: DTMNet: a discrete tchebichef moments-based deep neural network for multi-focus image fusion. In: International Conference on Computer Vision (2021)","DOI":"10.1109\/ICCV48922.2021.00011"},{"issue":"6","key":"9_CR40","doi-asserted-by":"publisher","first-page":"2627","DOI":"10.1109\/TCSVT.2022.3229691","volume":"33","author":"Z Wang","year":"2023","unstructured":"Wang, Z., et al.: VSP-Fuse: multifocus image fusion model using the knowledge transferred from visual salience priors. IEEE Trans. Circ. Syst. Video Technol. 33(6), 2627\u20132641 (2023). https:\/\/doi.org\/10.1109\/TCSVT.2022.3229691","journal-title":"IEEE Trans. Circ. Syst. Video Technol."},{"issue":"1","key":"9_CR41","doi-asserted-by":"publisher","first-page":"502","DOI":"10.1109\/TPAMI.2020.3012548","volume":"44","author":"H Xu","year":"2020","unstructured":"Xu, H., Ma, J., Jiang, J., Guo, X., Ling, H.: U2fusion: a unified unsupervised image fusion network. IEEE Trans. Pattern Anal. Mach. Intell. 44(1), 502\u2013518 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"9_CR42","doi-asserted-by":"publisher","unstructured":"Xu, H., Ma, J., Le, Z., Jiang, J., Guo, X.: FusionDN: a unified densely connected network for image fusion. In: Proceedings of the AAAI Conference Artificial Intelligence,Vol. 34, No. 07, pp. 12484\u201312491 (2020). https:\/\/doi.org\/10.1609\/aaai.v34i07.6936","DOI":"10.1609\/aaai.v34i07.6936"},{"key":"9_CR43","unstructured":"Xu, S., Wei, X., Zhang, C., Liu, J., Zhang, J.: MFFW: A new dataset for multi-focus image fusion. arXiv preprintarXiv:2002.04780 (2020)"},{"issue":"22","key":"9_CR44","doi-asserted-by":"publisher","first-page":"6647","DOI":"10.3390\/s20226647","volume":"20","author":"X Yan","year":"2020","unstructured":"Yan, X., Gilani, S.Z., Qin, H., Mian, A.: Structural similarity loss for learning to fuse multi-focus images. Sensors 20(22), 6647 (2020)","journal-title":"Sensors"},{"key":"9_CR45","doi-asserted-by":"crossref","unstructured":"Zamir, S., Arora, A., Khan, S., Hayat, M., Khan, F., Yang, M.: Restormer: efficient transformer for high-resolution image restoration. In: Proceedings of the IEEE\/CVF Conference on Computer Vision And Pattern Recognition (2022)","DOI":"10.1109\/CVPR52688.2022.00564"},{"key":"9_CR46","doi-asserted-by":"publisher","first-page":"127","DOI":"10.1016\/j.inffus.2022.11.014","volume":"92","author":"X Hu","year":"2023","unstructured":"Hu, X., Jiang, J., Liu, X., Ma, J.: ZMFF: zero-shot multi-focus image fusion. Inform. Fusion 92, 127\u2013138 (2023)","journal-title":"Inform. Fusion"},{"issue":"1","key":"9_CR47","doi-asserted-by":"publisher","first-page":"40","DOI":"10.1016\/j.inffus.2020.08.022","volume":"66","author":"H Zhang","year":"2021","unstructured":"Zhang, H.: MFF-Gan: an unsupervised generative adversarial network with adaptive and gradient joint constraints for multi-focus image fusion. Inform. Fusion 66(1), 40\u201353 (2021)","journal-title":"Inform. Fusion"},{"key":"9_CR48","doi-asserted-by":"publisher","unstructured":"Zhang, H., Xu, H., Xiao, Y., Guo, X., Ma, J.: Rethinking the image fusion: a fast unified image fusion network based on proportional maintenance of gradient and intensity. In: Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 34, No. 07, pp. 12797\u201312804 (2020). https:\/\/doi.org\/10.1609\/aaai.v34i07.6975","DOI":"10.1609\/aaai.v34i07.6975"},{"issue":"9","key":"9_CR49","first-page":"4819","volume":"44","author":"X Zhang","year":"2021","unstructured":"Zhang, X.: Deep learning-based multi-focus image fusion: a survey and a comparative study. IEEE Trans. Pattern Anal. Mach. Intell. 44(9), 4819\u20134838 (2021)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"9_CR50","doi-asserted-by":"publisher","first-page":"99","DOI":"10.1016\/j.inffus.2019.07.011","volume":"54","author":"Y Zhang","year":"2020","unstructured":"Zhang, Y., Liu, Y., Sun, P., Yan, H., Zhao, X., Zhang, L.: IFCNN: a general image fusion framework based on convolutional neural network. Inform. Fusion 54, 99\u2013118 (2020)","journal-title":"Inform. Fusion"},{"issue":"4","key":"9_CR51","doi-asserted-by":"publisher","first-page":"1102","DOI":"10.1109\/TCSVT.2018.2821177","volume":"29","author":"W Zhao","year":"2018","unstructured":"Zhao, W., Wang, D., Lu, H.: Multi-focus image fusion with a natural enhancement via joint multi-level deeply supervised convolutional neural network. IEEE Trans. Circuits Syst. Video Technol. 29(4), 1102\u20131115 (2018)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"16","key":"9_CR52","doi-asserted-by":"publisher","first-page":"22335","DOI":"10.1007\/s11042-021-11659-5","volume":"81","author":"Y Zhou","year":"2022","unstructured":"Zhou, Y., Liu, K., Dou, Q., Liu, Z., Jeon, G., Yang, X.: LNMF: lightweight network for multi-focus image fusion. Multimed. Tools Appl. 81(16), 22335\u201322353 (2022)","journal-title":"Multimed. Tools Appl."},{"issue":"3","key":"9_CR53","doi-asserted-by":"publisher","first-page":"733","DOI":"10.1049\/ipr2.12668","volume":"17","author":"X Jin","year":"2023","unstructured":"Jin, X., Xi, X., Zhou, D., Ren, X., Yang, J., Jiang, Q.: An unsupervised multi-focus image fusion method based on transformer and U-Net. IET Image Proc. 17(3), 733\u2013746 (2023)","journal-title":"IET Image Proc."}],"container-title":["Lecture Notes in Computer Science","Neural Information Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-96-6594-5_9","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,28]],"date-time":"2026-03-28T07:47:26Z","timestamp":1774684046000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-96-6594-5_9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,7,22]]},"ISBN":["9789819665938","9789819665945"],"references-count":53,"URL":"https:\/\/doi.org\/10.1007\/978-981-96-6594-5_9","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,7,22]]},"assertion":[{"value":"22 July 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICONIP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Neural Information Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Auckland","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"New Zealand","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2 December 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"6 December 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"31","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"iconip2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/iconip2024.org","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}