{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,4]],"date-time":"2026-04-04T11:31:01Z","timestamp":1775302261173,"version":"3.50.1"},"reference-count":84,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["12371512"],"award-info":[{"award-number":["12371512"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2025,5]]},"DOI":"10.1007\/s11263-024-02256-8","type":"journal-article","created":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T03:44:51Z","timestamp":1733111091000},"page":"2547-2567","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":33,"title":["ReFusion: Learning Image Fusion from Reconstruction with Learnable Loss Via Meta-Learning"],"prefix":"10.1007","volume":"133","author":[{"given":"Haowen","family":"Bai","sequence":"first","affiliation":[]},{"given":"Zixiang","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Jiangshe","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Yichen","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Lilun","family":"Deng","sequence":"additional","affiliation":[]},{"given":"Yukun","family":"Cui","sequence":"additional","affiliation":[]},{"given":"Baisong","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Shuang","family":"Xu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,2]]},"reference":[{"key":"2256_CR1","unstructured":"Antoniou, A., & Storkey, A. J. (2019). Learning to learn by self-critique. In Proceedings of the Advances in Neural Information Processing Systems (NeurIPS) (pp. 9936\u20139946)"},{"key":"2256_CR2","doi-asserted-by":"crossref","unstructured":"Baik, S., Choi, J., Kim, H., et\u00a0al. (2021). Meta-learning with task-adaptive loss function for few-shot learning. In Proceedings of the IEEE International Conference on Computer Vision (ICCV) (pp. 9445\u20139454). IEEE","DOI":"10.1109\/ICCV48922.2021.00933"},{"key":"2256_CR3","doi-asserted-by":"crossref","unstructured":"Bandara, W. G. C., & Patel, V. M. (2022). Hypertransformer: A textural and spectral feature fusion transformer for pansharpening. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 1757\u20131767)","DOI":"10.1109\/CVPR52688.2022.00181"},{"issue":"1","key":"2256_CR4","doi-asserted-by":"crossref","first-page":"12","DOI":"10.1016\/j.biosystemseng.2009.02.009","volume":"103","author":"D Bulanon","year":"2009","unstructured":"Bulanon, D., Burks, T., & Alchanatis, V. (2009). Image fusion of visible and thermal images for fruit detection. Biosystems Engineering, 103(1), 12\u201322.","journal-title":"Biosystems Engineering"},{"issue":"4","key":"2256_CR5","doi-asserted-by":"crossref","first-page":"2049","DOI":"10.1109\/TIP.2018.2794218","volume":"27","author":"J Cai","year":"2018","unstructured":"Cai, J., Gu, S., & Zhang, L. (2018). Learning a deep single image contrast enhancer from multi-exposure images. IEEE Transactions on Image Processing, 27(4), 2049\u20132062.","journal-title":"IEEE Transactions on Image Processing"},{"issue":"10","key":"2256_CR6","doi-asserted-by":"crossref","first-page":"3333","DOI":"10.1109\/TPAMI.2020.2984244","volume":"43","author":"X Deng","year":"2020","unstructured":"Deng, X., & Dragotti, P. L. (2020). Deep convolutional neural network for multi-modal image restoration and fusion. IEEE Transactions on Pattern Analysis and Machine Intelligence, 43(10), 3333\u20133348.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2256_CR7","unstructured":"Finn, C., Abbeel, P., Levine, S. (2017). Model-agnostic meta-learning for fast adaptation of deep networks. In Proceedings of the International Conference on Machine Learning (ICML) (pp. 1126\u20131135)"},{"key":"2256_CR8","unstructured":"Finn, C., Rajeswaran, A., Kakade, S., et\u00a0al. (2019). Online meta-learning. In Proceedings of the International conference on machine learning (ICML) (pp. 1920\u20131930)."},{"issue":"9","key":"2256_CR9","first-page":"5149","volume":"44","author":"TM Hospedales","year":"2022","unstructured":"Hospedales, T. M., Antoniou, A., Micaelli, P., et al. (2022). Meta-learning in neural networks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(9), 5149\u20135169.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2256_CR10","unstructured":"Houthooft, R., Chen, Y., Isola, P., et\u00a0al. (2018). Evolved policy gradients. In Proceedings of the Advances in Neural Information Processing Systems (NeurIPS) (pp. 5405\u20135414)."},{"key":"2256_CR11","doi-asserted-by":"crossref","first-page":"127","DOI":"10.1016\/j.inffus.2022.11.014","volume":"92","author":"X Hu","year":"2023","unstructured":"Hu, X., Jiang, J., Liu, X., et al. (2023). Zmff: Zero-shot multi-focus image fusion. Information Fusion, 92, 127\u2013138.","journal-title":"Information Fusion"},{"key":"2256_CR12","doi-asserted-by":"crossref","unstructured":"Hu, X., Sun, F., Sun, J., et\u00a0al. (2024). Cross-modal fusion and progressive decoding network for RGB-D salient object detection. International Journal of Computer Vision, 1\u201319.","DOI":"10.1007\/s11263-024-02020-y"},{"key":"2256_CR13","doi-asserted-by":"crossref","unstructured":"Huang, Z., Liu, J., Fan, X., et\u00a0al. (2022). Reconet: Recurrent correction network for fast and efficient multi-modality image fusion. In Proceedings of the European Conference on Computer Vision (ECCV) (pp. 539\u2013555). Springer","DOI":"10.1007\/978-3-031-19797-0_31"},{"key":"2256_CR14","doi-asserted-by":"crossref","first-page":"4","DOI":"10.1016\/j.inffus.2013.12.002","volume":"19","author":"AP James","year":"2014","unstructured":"James, A. P., & Dasarathy, B. V. (2014). Medical image fusion: A survey of the state of the art. Information Fusion, 19, 4\u201319.","journal-title":"Information Fusion"},{"key":"2256_CR15","doi-asserted-by":"crossref","unstructured":"Jiang, T., Wang, C., Li, X., et\u00a0al. (2023). Meflut: Unsupervised 1d lookup tables for multi-exposure image fusion. In Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV) (pp. 10542\u201310551).","DOI":"10.1109\/ICCV51070.2023.00967"},{"key":"2256_CR16","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1016\/j.infrared.2017.10.004","volume":"88","author":"X Jin","year":"2018","unstructured":"Jin, X., Jiang, Q., Yao, S., et al. (2018). Infrared and visual image fusion method based on discrete cosine transform and local spatial frequency in discrete stationary wavelet transform domain. Infrared Physics & Technology, 88, 1\u201312.","journal-title":"Infrared Physics & Technology"},{"key":"2256_CR17","doi-asserted-by":"crossref","first-page":"3845","DOI":"10.1109\/TIP.2020.2966075","volume":"29","author":"H Jung","year":"2020","unstructured":"Jung, H., Kim, Y., Jang, H., et al. (2020). Unsupervised deep image fusion with structure tensor representations. IEEE Transactions on Image Processing, 29, 3845\u20133858.","journal-title":"IEEE Transactions on Image Processing"},{"issue":"5","key":"2256_CR18","doi-asserted-by":"crossref","first-page":"2614","DOI":"10.1109\/TIP.2018.2887342","volume":"28","author":"H Li","year":"2018","unstructured":"Li, H., & Wu, X. J. (2018). Densefuse: A fusion approach to infrared and visible images. IEEE Transactions on Image Processing, 28(5), 2614\u20132623.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2256_CR19","doi-asserted-by":"crossref","first-page":"4070","DOI":"10.1109\/TIP.2021.3069339","volume":"30","author":"H Li","year":"2021","unstructured":"Li, H., Cen, Y., Liu, Y., et al. (2021). Different input resolutions and arbitrary output resolution: A meta learning-based deep framework for infrared and visible image fusion. IEEE Transactions on Image Processing, 30, 4070\u20134083.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2256_CR20","doi-asserted-by":"crossref","first-page":"72","DOI":"10.1016\/j.inffus.2021.02.023","volume":"73","author":"H Li","year":"2021","unstructured":"Li, H., Wu, X. J., & Kittler, J. (2021). Rfn-nest: An end-to-end residual fusion network for infrared and visible images. Information Fusion, 73, 72\u201386.","journal-title":"Information Fusion"},{"key":"2256_CR21","unstructured":"Li, H., Liu, J., Zhang, Y., et\u00a0al. (2023a). A deep learning framework for infrared and visible image fusion without strict registration. International Journal of Computer Vision, 1\u201320"},{"issue":"9","key":"2256_CR22","doi-asserted-by":"crossref","first-page":"11040","DOI":"10.1109\/TPAMI.2023.3268209","volume":"45","author":"H Li","year":"2023","unstructured":"Li, H., Xu, T., Wu, X. J., et al. (2023). Lrrnet: A novel representation learning guided fusion network for infrared and visible images. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(9), 11040\u201311052.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2256_CR23","unstructured":"Li, J., Liu, J., Zhou, S., et\u00a0al. (2023c). Gesenet: A general semantic-guided network with couple mask ensemble for medical image fusion. IEEE Transactions on Neural Networks and Learning Systems, 1\u201314"},{"key":"2256_CR24","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1016\/j.inffus.2019.12.014","volume":"58","author":"Y Li","year":"2020","unstructured":"Li, Y., Zhao, H., Hu, Z., et al. (2020). Ivfusenet: Fusion of infrared and visible light images for depth prediction. Information Fusion, 58, 1\u201312.","journal-title":"Information Fusion"},{"key":"2256_CR25","unstructured":"Li, Z., Zhou, F., Chen, F., et\u00a0al. (2017). Meta-sgd: Learning to learn quickly for few-shot learning. arXiv preprint arXiv:1707.09835"},{"key":"2256_CR26","doi-asserted-by":"crossref","unstructured":"Liang, P., Jiang, J., Liu, X., et\u00a0al. (2022). Fusion from decomposition: A self-supervised decomposition approach for image fusion. In Proceedings of the European conference on computer vision (ECCV).","DOI":"10.1007\/978-3-031-19797-0_41"},{"key":"2256_CR27","doi-asserted-by":"crossref","first-page":"60","DOI":"10.1016\/j.media.2017.07.005","volume":"42","author":"G Litjens","year":"2017","unstructured":"Litjens, G., Kooi, T., Bejnordi, B. E., et al. (2017). A survey on deep learning in medical image analysis. Medical Image Analysis, 42, 60\u201388.","journal-title":"Medical Image Analysis"},{"key":"2256_CR28","unstructured":"Liu, H., Simonyan, K., Yang, Y. (2018). Darts: Differentiable architecture search. arXiv preprint arXiv:1806.09055"},{"key":"2256_CR29","doi-asserted-by":"crossref","first-page":"1153","DOI":"10.1007\/s11263-020-01418-8","volume":"129","author":"J Liu","year":"2021","unstructured":"Liu, J., Xu, D., Yang, W., et al. (2021). Benchmarking low-light image enhancement and beyond. International Journal of Computer Vision, 129, 1153\u20131184.","journal-title":"International Journal of Computer Vision"},{"key":"2256_CR30","doi-asserted-by":"crossref","unstructured":"Liu, J., Fan, X., Huang, Z., et\u00a0al. (2022a). Target-aware dual adversarial learning and a multi-scenario multi-modality benchmark to fuse infrared and visible for object detection. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 5792\u20135801).","DOI":"10.1109\/CVPR52688.2022.00571"},{"issue":"8","key":"2256_CR31","doi-asserted-by":"crossref","first-page":"5026","DOI":"10.1109\/TCSVT.2022.3144455","volume":"32","author":"J Liu","year":"2022","unstructured":"Liu, J., Shang, J., Liu, R., et al. (2022). Attention-guided global-local adversarial learning for detail-preserving multi-exposure image fusion. IEEE Transactions on Circuits and Systems for Video Technology, 32(8), 5026\u20135040.","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"2256_CR32","doi-asserted-by":"crossref","unstructured":"Liu, J., Liu, Z., Wu, G., et\u00a0al. (2023a). Multi-interactive feature learning and a full-time multi-modality benchmark for image fusion and segmentation. In Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV) (pp. 8115\u20138124).","DOI":"10.1109\/ICCV51070.2023.00745"},{"key":"2256_CR33","doi-asserted-by":"crossref","first-page":"237","DOI":"10.1016\/j.inffus.2023.02.027","volume":"95","author":"J Liu","year":"2023","unstructured":"Liu, J., Wu, G., Luan, J., et al. (2023). Holoco: Holistic and local contrastive learning network for multi-exposure image fusion. Information Fusion, 95, 237\u2013249.","journal-title":"Information Fusion"},{"issue":"5","key":"2256_CR34","doi-asserted-by":"crossref","first-page":"1748","DOI":"10.1007\/s11263-023-01952-1","volume":"132","author":"J Liu","year":"2024","unstructured":"Liu, J., Lin, R., Wu, G., et al. (2024). Coconet: Coupled contrastive learning network with multi-level feature ensemble for multi-modality image fusion. International Journal of Computer Vision, 132(5), 1748\u20131775.","journal-title":"International Journal of Computer Vision"},{"key":"2256_CR35","doi-asserted-by":"crossref","unstructured":"Liu, R., Liu, Z., Liu, J., et\u00a0al. (2021b). Searching a hierarchically aggregated fusion architecture for fast multi-modality image fusion. In Proceedings of the ACM International Conference on Multimedia (ACM MM) (pp. 1600\u20131608). ACM","DOI":"10.1145\/3474085.3475299"},{"key":"2256_CR36","doi-asserted-by":"crossref","first-page":"131","DOI":"10.1016\/j.neucom.2017.01.006","volume":"235","author":"X Liu","year":"2017","unstructured":"Liu, X., Mei, W., & Du, H. (2017). Structure tensor and nonsubsampled shearlet transform based algorithm for CT and MRI image fusion. Neurocomputing, 235, 131\u2013139.","journal-title":"Neurocomputing"},{"key":"2256_CR37","doi-asserted-by":"crossref","first-page":"9","DOI":"10.1016\/j.sigpro.2013.10.010","volume":"97","author":"Y Liu","year":"2014","unstructured":"Liu, Y., Jin, J., Wang, Q., et al. (2014). Region level based multi-focus image fusion using quaternion wavelet and normalized cut. Signal Process, 97, 9\u201330.","journal-title":"Signal Process"},{"key":"2256_CR38","doi-asserted-by":"crossref","first-page":"71","DOI":"10.1016\/j.inffus.2020.06.013","volume":"64","author":"Y Liu","year":"2020","unstructured":"Liu, Y., Wang, L., Cheng, J., et al. (2020). Multi-focus image fusion: A survey of the state of the art. Information Fusion, 64, 71\u201391.","journal-title":"Information Fusion"},{"key":"2256_CR39","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1016\/j.inffus.2022.06.001","volume":"86","author":"Y Liu","year":"2022","unstructured":"Liu, Y., Wang, L., Li, H., et al. (2022). Multi-focus image fusion with deep residual learning and focus property detection. Information Fusion, 86, 1\u201316.","journal-title":"Information Fusion"},{"key":"2256_CR40","doi-asserted-by":"crossref","first-page":"153","DOI":"10.1016\/j.inffus.2018.02.004","volume":"45","author":"J Ma","year":"2019","unstructured":"Ma, J., Ma, Y., & Li, C. (2019). Infrared and visible image fusion methods and applications: A survey. Information Fusion, 45, 153\u2013178.","journal-title":"Information Fusion"},{"key":"2256_CR41","doi-asserted-by":"crossref","first-page":"11","DOI":"10.1016\/j.inffus.2018.09.004","volume":"48","author":"J Ma","year":"2019","unstructured":"Ma, J., Yu, W., Liang, P., et al. (2019). Fusiongan: A generative adversarial network for infrared and visible image fusion. Information Fusion, 48, 11\u201326.","journal-title":"Information Fusion"},{"key":"2256_CR42","doi-asserted-by":"crossref","first-page":"4980","DOI":"10.1109\/TIP.2020.2977573","volume":"29","author":"J Ma","year":"2020","unstructured":"Ma, J., Xu, H., Jiang, J., et al. (2020). Ddcgan: A dual-discriminator conditional generative adversarial network for multi-resolution image fusion. IEEE Transactions on Image Processing, 29, 4980\u20134995.","journal-title":"IEEE Transactions on Image Processing"},{"issue":"5","key":"2256_CR43","doi-asserted-by":"crossref","first-page":"2519","DOI":"10.1109\/TIP.2017.2671921","volume":"26","author":"K Ma","year":"2017","unstructured":"Ma, K., Li, H., Yong, H., et al. (2017). Robust multi-exposure image fusion: A structural patch decomposition approach. IEEE Transactions on Image Processing, 26(5), 2519\u20132532.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2256_CR44","doi-asserted-by":"crossref","first-page":"2808","DOI":"10.1109\/TIP.2019.2952716","volume":"29","author":"K Ma","year":"2019","unstructured":"Ma, K., Duanmu, Z., Zhu, H., et al. (2019). Deep guided learning for fast multi-exposure image fusion. IEEE Transactions on Image Processing, 29, 2808\u20132819.","journal-title":"IEEE Transactions on Image Processing"},{"issue":"10","key":"2256_CR45","doi-asserted-by":"crossref","first-page":"1360","DOI":"10.1109\/TCSVT.2007.903776","volume":"17","author":"V Maik","year":"2007","unstructured":"Maik, V., Cho, D., Shin, J., et al. (2007). Regularized restoration using image fusion for digital auto-focusing. IEEE Transactions on Circuits and Systems for Video Technology, 17(10), 1360\u20131369.","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"2256_CR46","doi-asserted-by":"crossref","first-page":"72","DOI":"10.1016\/j.inffus.2014.10.004","volume":"25","author":"M Nejati","year":"2015","unstructured":"Nejati, M., Samavi, S., & Shirani, S. (2015). Multi-focus image fusion using dictionary-based sparse representation. Information Fusion, 25, 72\u201384.","journal-title":"Information Fusion"},{"key":"2256_CR47","unstructured":"Nichol, A., Achiam, J., Schulman, J. (2018). On first-order meta-learning algorithms. arXiv preprint arXiv:1803.02999"},{"key":"2256_CR48","doi-asserted-by":"crossref","unstructured":"Qin, X., Quan, Y., Pang, T., et\u00a0al. (2023). Ground-truth free meta-learning for deep compressive sampling. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 9947\u20139956).","DOI":"10.1109\/CVPR52729.2023.00959"},{"key":"2256_CR49","unstructured":"Ren, M., Zeng, W., Yang, B., et\u00a0al. (2018). Learning to reweight examples for robust deep learning. In Proceedings of the International Conference on Machine Learning (ICML) (pp 4334\u20134343)."},{"key":"2256_CR50","unstructured":"Shu, J., Xie, Q., Yi, L., et\u00a0al. (2019). Meta-weight-net: Learning an explicit mapping for sample weighting. In Proceedings of the Advances in Neural Information Processing Systems (NeurIPS) (Vol. 32)"},{"issue":"12","key":"2256_CR51","doi-asserted-by":"crossref","first-page":"2121","DOI":"10.1109\/JAS.2022.106082","volume":"9","author":"L Tang","year":"2022","unstructured":"Tang, L., Deng, Y., Ma, Y., et al. (2022). Superfusion: A versatile image registration and fusion network with semantic awareness. IEEE\/CAA Journal of Automatica Sinica, 9(12), 2121\u20132137.","journal-title":"IEEE\/CAA Journal of Automatica Sinica"},{"key":"2256_CR52","doi-asserted-by":"crossref","first-page":"28","DOI":"10.1016\/j.inffus.2021.12.004","volume":"82","author":"L Tang","year":"2022","unstructured":"Tang, L., Yuan, J., & Ma, J. (2022). Image fusion in the loop of high-level vision tasks: A semantic-aware real-time infrared and visible image fusion network. Information Fusion, 82, 28\u201342.","journal-title":"Information Fusion"},{"key":"2256_CR53","doi-asserted-by":"crossref","first-page":"79","DOI":"10.1016\/j.inffus.2022.03.007","volume":"83\u201384","author":"L Tang","year":"2022","unstructured":"Tang, L., Yuan, J., Zhang, H., et al. (2022). Piafusion: A progressive infrared and visible image fusion network based on illumination aware. Infromation Fusion, 83\u201384, 79\u201392.","journal-title":"Infromation Fusion"},{"key":"2256_CR54","volume":"99","author":"L Tang","year":"2023","unstructured":"Tang, L., Zhang, H., Xu, H., et al. (2023). Rethinking the necessity of image fusion in high-level vision tasks: A practical infrared and visible image fusion network based on progressive semantic injection and scene fidelity. Information Fusion, 99, 101870.","journal-title":"Information Fusion"},{"key":"2256_CR55","doi-asserted-by":"crossref","first-page":"5134","DOI":"10.1109\/TIP.2022.3193288","volume":"31","author":"W Tang","year":"2022","unstructured":"Tang, W., He, F., Liu, Y., et al. (2022). Matr: Multimodal medical image fusion via multiscale adaptive transformer. IEEE Transactions on Image Processing, 31, 5134\u20135149.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2256_CR56","doi-asserted-by":"crossref","first-page":"17573","DOI":"10.1007\/s11042-018-7124-9","volume":"78","author":"E Vakaimalar","year":"2019","unstructured":"Vakaimalar, E., & Mala, K. (2019). Multifocus image fusion scheme based on discrete cosine transform and spatial frequency. Multimedia Tools and Applications, 78, 17573\u201317587.","journal-title":"Multimedia Tools and Applications"},{"issue":"4","key":"2256_CR57","doi-asserted-by":"crossref","first-page":"1029","DOI":"10.1007\/s11263-023-01924-5","volume":"132","author":"W Wang","year":"2024","unstructured":"Wang, W., Deng, L. J., Ran, R., et al. (2024). A general paradigm with detail-preserving conditional invertible network for image fusion. International Journal of Computer Vision, 132(4), 1029\u20131054.","journal-title":"International Journal of Computer Vision"},{"key":"2256_CR58","doi-asserted-by":"crossref","first-page":"4527","DOI":"10.1109\/TIP.2022.3184250","volume":"31","author":"Z Wang","year":"2022","unstructured":"Wang, Z., Li, X., Duan, H., et al. (2022). A self-supervised residual feature learning model for multifocus image fusion. IEEE Transactions on Image Processing, 31, 4527\u20134542.","journal-title":"IEEE Transactions on Image Processing"},{"key":"2256_CR59","doi-asserted-by":"crossref","unstructured":"Wang, Z., Li, X., Zhao, L., et\u00a0al. (2023). When multi-focus image fusion networks meet traditional edge-preservation technology. International Journal of Computer Vision, 1\u201324","DOI":"10.1007\/s11263-023-01806-w"},{"key":"2256_CR60","doi-asserted-by":"crossref","first-page":"944","DOI":"10.1109\/TMM.2023.3273924","volume":"26","author":"J Wen","year":"2023","unstructured":"Wen, J., Qin, F., Du, J., et al. (2023). Msgfusion: Medical semantic guided two-branch network for multimodal brain image fusion. IEEE Transactions on Multimedia, 26, 944\u2013957.","journal-title":"IEEE Transactions on Multimedia"},{"key":"2256_CR61","doi-asserted-by":"crossref","first-page":"177","DOI":"10.1016\/j.inffus.2021.06.001","volume":"76","author":"H Xu","year":"2021","unstructured":"Xu, H., & Ma, J. (2021). Emfusion: An unsupervised enhanced medical image fusion network. Information Fusion, 76, 177\u2013186.","journal-title":"Information Fusion"},{"key":"2256_CR62","doi-asserted-by":"crossref","unstructured":"Xu, H., Ma, J., Le, Z., et\u00a0al. (2020). Fusiondn: A unified densely connected network for image fusion. In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI) (pp. 12484\u201312491).","DOI":"10.1609\/aaai.v34i07.6936"},{"issue":"1","key":"2256_CR63","doi-asserted-by":"crossref","first-page":"502","DOI":"10.1109\/TPAMI.2020.3012548","volume":"44","author":"H Xu","year":"2022","unstructured":"Xu, H., Ma, J., Jiang, J., et al. (2022). U2fusion: A unified unsupervised image fusion network. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(1), 502\u2013518.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2256_CR64","doi-asserted-by":"crossref","unstructured":"Xu, H., Ma, J., Yuan, J., et\u00a0al. (2022b). Rfnet: Unsupervised network for mutually reinforcing multi-modal image registration and fusion. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 19679\u201319688).","DOI":"10.1109\/CVPR52688.2022.01906"},{"key":"2256_CR65","doi-asserted-by":"crossref","unstructured":"Xu, H., Yuan, J., & Ma, J. (2023). Murf: Mutually reinforcing multi-modal image registration and fusion. IEEE Transactions on Pattern Analysis and Machine Intelligence","DOI":"10.1109\/TPAMI.2023.3283682"},{"key":"2256_CR66","doi-asserted-by":"crossref","unstructured":"Xu, H., Zhang, H., Yi, X., et\u00a0al. (2024). Cretinex: A progressive color-shift aware retinex model for low-light image enhancement. International Journal of Computer Vision, 1\u201323","DOI":"10.1007\/s11263-024-02065-z"},{"key":"2256_CR67","doi-asserted-by":"crossref","unstructured":"Xu, S., Zhang, J., Zhao, Z., et\u00a0al. (2021). Deep gradient projection networks for pan-sharpening. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 1366\u20131375)","DOI":"10.1109\/CVPR46437.2021.00142"},{"key":"2256_CR68","doi-asserted-by":"crossref","unstructured":"Yan, Q., Gong, D., Shi, J. Q., et\u00a0al. (2022). Dual-attention-guided network for ghost-free high dynamic range imaging. International Journal of Computer Vision, 1\u201319.","DOI":"10.1007\/s11263-021-01535-y"},{"issue":"22","key":"2256_CR69","doi-asserted-by":"crossref","first-page":"6647","DOI":"10.3390\/s20226647","volume":"20","author":"X Yan","year":"2020","unstructured":"Yan, X., Gilani, S. Z., Qin, H., et al. (2020). Structural similarity loss for learning to fuse multi-focus images. Sensors, 20(22), 6647.","journal-title":"Sensors"},{"key":"2256_CR70","doi-asserted-by":"crossref","unstructured":"Zamir, S. W., Arora, A., Khan, S., et\u00a0al. (2022). Restormer: Efficient transformer for high-resolution image restoration. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (pp. 5728\u20135739).","DOI":"10.1109\/CVPR52688.2022.00564"},{"issue":"10","key":"2256_CR71","doi-asserted-by":"crossref","first-page":"2761","DOI":"10.1007\/s11263-021-01501-8","volume":"129","author":"H Zhang","year":"2021","unstructured":"Zhang, H., & Ma, J. (2021). Sdnet: A versatile squeeze-and-decomposition network for real-time image fusion. International Journal of Computer Vision, 129(10), 2761\u20132785.","journal-title":"International Journal of Computer Vision"},{"key":"2256_CR72","doi-asserted-by":"crossref","first-page":"370","DOI":"10.1016\/j.patrec.2020.08.002","volume":"138","author":"J Zhang","year":"2020","unstructured":"Zhang, J., Liao, Q., Liu, S., et al. (2020). Real-mff: A large realistic multi-focus image dataset with ground truth. Pattern Recognition Letters, 138, 370\u2013377.","journal-title":"Pattern Recognition Letters"},{"key":"2256_CR73","doi-asserted-by":"crossref","unstructured":"Zhang, Q., Yuan, Q., Song, M., et al. (2022). Cooperated spectral low-rankness prior and deep spatial prior for HSI unsupervised denoising. IEEE Transactions on Image Processing, 31, 6356\u20136368.","DOI":"10.1109\/TIP.2022.3211471"},{"key":"2256_CR74","unstructured":"Zhang, Q., Zheng, Y., Yuan, Q., et\u00a0al. (2023). Hyperspectral image denoising: From model-driven, data-driven, to model-data-driven. IEEE Transactions on Neural Networks and Learning Systems, 1\u201321."},{"key":"2256_CR75","doi-asserted-by":"crossref","first-page":"111","DOI":"10.1016\/j.inffus.2021.02.005","volume":"74","author":"X Zhang","year":"2021","unstructured":"Zhang, X. (2021). Benchmarking and comparing multi-exposure image fusion algorithms. Information Fusion, 74, 111\u2013131.","journal-title":"Information Fusion"},{"issue":"9","key":"2256_CR76","first-page":"4819","volume":"44","author":"X Zhang","year":"2021","unstructured":"Zhang, X. (2021). Deep learning-based multi-focus image fusion: A survey and a comparative study. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(9), 4819\u20134838.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2256_CR77","doi-asserted-by":"crossref","first-page":"1013","DOI":"10.1007\/s11263-020-01407-x","volume":"129","author":"Y Zhang","year":"2021","unstructured":"Zhang, Y., Guo, X., Ma, J., et al. (2021). Beyond brightening low-light images. International Journal of Computer Vision, 129, 1013\u20131037.","journal-title":"International Journal of Computer Vision"},{"key":"2256_CR78","doi-asserted-by":"crossref","unstructured":"Zhao, F., Zhao, W., & Lu, H. (2023a). Interactive feature embedding for infrared and visible image fusion. IEEE Transactions on Neural Networks and Learning Systems","DOI":"10.1109\/TNNLS.2023.3264911"},{"key":"2256_CR79","doi-asserted-by":"crossref","unstructured":"Zhao, W., Xie, S., Zhao, F., et\u00a0al. (2023b). Metafusion: Infrared and visible image fusion via meta-feature embedding from object detection. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (pp. 13955\u201313965).","DOI":"10.1109\/CVPR52729.2023.01341"},{"key":"2256_CR80","doi-asserted-by":"crossref","unstructured":"Zhao, Z., Xu, S., Zhang, C., et\u00a0al. (2021). Didfuse: deep image decomposition for infrared and visible image fusion. In Proceedings of the Twenty-Ninth International Conference on International Joint Conferences on Artificial Intelligence (pp. 976\u2013976).","DOI":"10.24963\/ijcai.2020\/135"},{"key":"2256_CR81","doi-asserted-by":"crossref","unstructured":"Zhao, Z., Bai, H., Zhang, J., et\u00a0al. (2023c). Cddfuse: Correlation-driven dual-branch feature decomposition for multi-modality image fusion. In Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (pp. 5906\u20135916).","DOI":"10.1109\/CVPR52729.2023.00572"},{"key":"2256_CR82","doi-asserted-by":"crossref","unstructured":"Zhao, Z., Bai, H., Zhu, Y., et\u00a0al. (2023d). Ddfm: denoising diffusion model for multi-modality image fusion. In Proceedings of the IEEE\/CVF International Conference on Computer Vision (pp. 8082\u20138093).","DOI":"10.1109\/ICCV51070.2023.00742"},{"key":"2256_CR83","doi-asserted-by":"crossref","first-page":"635","DOI":"10.1109\/TMM.2021.3129609","volume":"25","author":"H Zhou","year":"2021","unstructured":"Zhou, H., Wu, W., Zhang, Y., et al. (2021). Semantic-supervised infrared and visible image fusion via a dual-discriminator generative adversarial network. IEEE Transactions on Multimedia, 25, 635\u2013648.","journal-title":"IEEE Transactions on Multimedia"},{"key":"2256_CR84","doi-asserted-by":"crossref","unstructured":"Zhou, J., Liu, Q., Jiang, Q., et\u00a0al. (2023). Underwater camera: Improving visual perception via adaptive dark pixel prior and color correction. International Journal of Computer Vision, 1\u201319.","DOI":"10.1007\/s11263-023-01853-3"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-024-02256-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-024-02256-8\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-024-02256-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,17]],"date-time":"2025-04-17T06:00:25Z","timestamp":1744869625000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-024-02256-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,2]]},"references-count":84,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2025,5]]}},"alternative-id":["2256"],"URL":"https:\/\/doi.org\/10.1007\/s11263-024-02256-8","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12,2]]},"assertion":[{"value":"21 May 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"23 September 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 December 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}