{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T08:56:29Z","timestamp":1770800189384,"version":"3.50.0"},"reference-count":73,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,12,4]],"date-time":"2025-12-04T00:00:00Z","timestamp":1764806400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,12,4]],"date-time":"2025-12-04T00:00:00Z","timestamp":1764806400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100004608","name":"Natural Science Foundation of Jiangsu Province","doi-asserted-by":"crossref","award":["BK20230440"],"award-info":[{"award-number":["BK20230440"]}],"id":[{"id":"10.13039\/501100004608","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1007\/s00530-025-02089-6","type":"journal-article","created":{"date-parts":[[2025,12,4]],"date-time":"2025-12-04T07:19:23Z","timestamp":1764832763000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["IMENet: infrared-guided multimodal enhancement network for low-light vision"],"prefix":"10.1007","volume":"32","author":[{"given":"Xiaobing","family":"Yu","sequence":"first","affiliation":[]},{"given":"Zhikai","family":"Wei","sequence":"additional","affiliation":[]},{"given":"Huapeng","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Chenyang","family":"Lu","sequence":"additional","affiliation":[]},{"given":"Zebin","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Tianming","family":"Zhan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,12,4]]},"reference":[{"key":"2089_CR1","doi-asserted-by":"publisher","first-page":"11 782","DOI":"10.1109\/ACCESS.2018.2797872","volume":"6","author":"Y Chang","year":"2018","unstructured":"Chang, Y., Jung, C., Ke, P., Song, H., Hwang, J.: Automatic contrast-limited adaptive histogram equalization with dual gamma correction. Ieee Access 6, 11 782-11 792 (2018)","journal-title":"Ieee Access"},{"key":"2089_CR2","doi-asserted-by":"crossref","unstructured":"Banik, P.P., Saha, R., Kim, K.-D.: Contrast enhancement of low-light image using histogram equalization and illumination adjustment, in 2018 intl. conf. electron. info. Commn. (ICEIC). IEEE 2018, 1\u20134 (2018)","DOI":"10.23919\/ELINFOCOM.2018.8330564"},{"key":"2089_CR3","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Zheng, H., Hong, R., Xu, M., Yan, S., Wang, M.: Deep color consistent network for low-light image enhancement. In: Proc. IEEE\/CVF conf. comput. vis. pattern recogn., pp. 1899\u20131908 (2022)","DOI":"10.36227\/techrxiv.17198216.v2"},{"key":"2089_CR4","unstructured":"Liu, R., Ma, L., Zhang, J., Fan, X., Luo, Z.: Retinex-inspired unrolling with cooperative prior architecture search for low-light image enhancement. Proc. IEEE\/CVF conf. comput. vis. pattern recogn, 10, 561\u201310 570 (2021)"},{"issue":"7","key":"2089_CR5","doi-asserted-by":"publisher","first-page":"965","DOI":"10.1109\/83.597272","volume":"6","author":"DJ Jobson","year":"1997","unstructured":"Jobson, D.J., Rahman, Z.-U., Woodell, G.A.: A multiscale retinex for bridging the gap between color images and the human observation of scenes. IEEE Trans. Image Process. 6(7), 965\u2013976 (1997)","journal-title":"IEEE Trans. Image Process."},{"key":"2089_CR6","doi-asserted-by":"publisher","first-page":"2072","DOI":"10.1109\/TIP.2021.3050850","volume":"30","author":"W Yang","year":"2021","unstructured":"Yang, W., Wang, W., Huang, H., Wang, S., Liu, J.: Sparse gradient regularized deep retinex network for robust low-light image enhancement. IEEE Trans. Image Process. 30, 2072\u20132086 (2021)","journal-title":"IEEE Trans. Image Process."},{"issue":"6","key":"2089_CR7","doi-asserted-by":"publisher","first-page":"2828","DOI":"10.1109\/TIP.2018.2810539","volume":"27","author":"M Li","year":"2018","unstructured":"Li, M., Liu, J., Yang, W., Sun, X., Guo, Z.: Structure-revealing low-light image enhancement via robust retinex model. IEEE Trans. Image Process. 27(6), 2828\u20132841 (2018)","journal-title":"IEEE Trans. Image Process."},{"key":"2089_CR8","doi-asserted-by":"publisher","first-page":"1213","DOI":"10.1109\/TMM.2023.3278385","volume":"26","author":"K Wu","year":"2023","unstructured":"Wu, K., Huang, J., Ma, Y., Fan, F., Ma, J.: Cycle-retinex: Unpaired low-light image enhancement via retinex-inline cyclegan. IEEE Trans. Multimedia 26, 1213\u20131228 (2023)","journal-title":"IEEE Trans. Multimedia"},{"key":"2089_CR9","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Zhang, J., Guo, X.: Kindling the darkness: A practical low-light image enhancer, in Proc. 27th ACM intl. conf. multimed., pp. 1632\u20131640 (2019)","DOI":"10.1145\/3343031.3350926"},{"key":"2089_CR10","doi-asserted-by":"publisher","first-page":"1013","DOI":"10.1007\/s11263-020-01407-x","volume":"129","author":"Y Zhang","year":"2021","unstructured":"Zhang, Y., Guo, X., Ma, J., Liu, W., Zhang, J.: Beyond brightening low-light images. Int. J. Comput. Vision 129, 1013\u20131037 (2021)","journal-title":"Int. J. Comput. Vision"},{"key":"2089_CR11","doi-asserted-by":"crossref","unstructured":"Wang, C., Jin, Z.: Brighten-and-colorize: A decoupled network for customized low-light image enhancement, in Proc. 31st ACM Intl. Conf. Multimed., pp. 8356\u20138366 (2023)","DOI":"10.1145\/3581783.3611907"},{"key":"2089_CR12","doi-asserted-by":"crossref","unstructured":"Yan, Q., Feng, Y., Zhang, C., Wang, P., Wu, P., Dong, W., Sun, J., Zhang, Y.: You only need one color space: An efficient network for low-light image enhancement, arXiv:2402.05809 (2024)","DOI":"10.1109\/CVPR52734.2025.00533"},{"key":"2089_CR13","doi-asserted-by":"crossref","unstructured":"Hashmi, K. A., Kallempudi, G., Stricker, D., Afzal, M. Z.: Featenhancer: Enhancing hierarchical features for object detection and beyond under low-light vision, in Proc. IEEE\/CVF Intl. Conf. Comput. Vis., pp. 6725\u20136735 (2023)","DOI":"10.1109\/ICCV51070.2023.00619"},{"issue":"3","key":"2089_CR14","first-page":"2654","volume":"37","author":"T Wang","year":"2023","unstructured":"Wang, T., Zhang, K., Shen, T., Luo, W., Stenger, B., Lu, T.: Ultra-high-definition low-light image enhancement: A benchmark and transformer-based method. Proc. AAAI Conf. Artif. Intell. 37(3), 2654\u20132662 (2023)","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"2089_CR15","unstructured":"Xu, X., Wang, R., Fu, C.-W., Jia, J.: Snr-aware low-light image enhancement, in Proc. IEEE\/CVF conf. comput. vis. pattern. recogn., pp. 17 714\u201317 724 (2022)"},{"key":"2089_CR16","unstructured":"Cai, Y., Bian, H., Lin, J., Wang, H., Timofte, R., Zhang, Y.: Retinexformer: One-stage retinex-based transformer for low-light image enhancement, in Proc. IEEE\/CVF intl. conf. comput vis, pp. 12 504\u201312 513 (2023)"},{"key":"2089_CR17","doi-asserted-by":"crossref","unstructured":"Zamir, S. W., Arora, A., Khan, S., Hayat, M., Khan, F. S., Yang, M.-H.: Restormer: Efficient transformer for high-resolution image restoration, in Proc. IEEE\/CVF conf. comput. vis. pattern recogn., pp. 5728\u20135739 (2022)","DOI":"10.1109\/CVPR52688.2022.00564"},{"key":"2089_CR18","unstructured":"Cui, Z., Li, K., Gu, L., Su, S., Gao, P., Jiang, Z., Qiao, Y., Harada, T.: You only need 90k parameters to adapt light: a light weight transformer for image enhancement and exposure correction, arXiv:2205.14871 (2022)"},{"key":"2089_CR19","doi-asserted-by":"publisher","first-page":"26","DOI":"10.1016\/j.inffus.2023.02.011","volume":"95","author":"H Li","year":"2023","unstructured":"Li, H., Zhao, J., Li, J., Yu, Z., Lu, G.: Feature dynamic alignment and refinement for infrared-visible image fusion: Translation robust fusion. Information Fusion 95, 26\u201341 (2023)","journal-title":"Information Fusion"},{"key":"2089_CR20","doi-asserted-by":"crossref","unstructured":"Li, H., Wu, X.-J., Kittler, J.: Infrared and visible image fusion using a deep learning framework, in 2018 24th intl. conf. pattern recogn. (ICPR). IEEE 2018, 2705\u20132710 (2018)","DOI":"10.1109\/ICPR.2018.8546006"},{"issue":"18","key":"2089_CR21","doi-asserted-by":"publisher","first-page":"10336","DOI":"10.3390\/app131810336","volume":"13","author":"C Lei","year":"2023","unstructured":"Lei, C., Tian, Q.: Low-light image enhancement algorithm based on deep learning and retinex theory. Appl. Sci. 13(18), 10336 (2023)","journal-title":"Appl. Sci."},{"issue":"1","key":"2089_CR22","doi-asserted-by":"publisher","first-page":"2323","DOI":"10.1007\/s11042-023-15242-y","volume":"83","author":"X Li","year":"2024","unstructured":"Li, X., Li, Q., Anisetti, M., Jeon, G., Gao, M.: A structure and texture revealing retinex model for low-light image enhancement. Multimed. Tools Appl. 83(1), 2323\u20132347 (2024)","journal-title":"Multimed. Tools Appl."},{"key":"2089_CR23","doi-asserted-by":"crossref","unstructured":"Peng, S., Zhu, X., Deng, H., Lei, Z., Deng, L.-J.: Fusionmamba: Efficient image fusion with state space model, pp. arXiv\u20132404 (2024)","DOI":"10.1109\/TGRS.2024.3496073"},{"issue":"11","key":"2089_CR24","doi-asserted-by":"publisher","first-page":"7403","DOI":"10.1109\/TCSVT.2022.3186880","volume":"32","author":"G-D Fan","year":"2022","unstructured":"Fan, G.-D., Fan, B., Gan, M., Chen, G.-Y., Chen, C.P.: Multiscale low-light image enhancement network with illumination constraint. IEEE Trans. Circuits Syst. Video Technol. 32(11), 7403\u20137417 (2022)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"2089_CR25","doi-asserted-by":"crossref","unstructured":"Ma, T., Ma, L., Li, Z., Wang, Y., Liu, J., Xu, C., Liu, R.: Rethinking reconstruction and denoising in the dark: New perspective, general architecture and beyond, in Proc. Comput. Vis. Pattern Recogn. Conf., pp. 2323\u20132332 (2025)","DOI":"10.1109\/CVPR52734.2025.00222"},{"key":"2089_CR26","doi-asserted-by":"crossref","unstructured":"Wang, R., Zhang, Q., Fu, C.-W., Shen, X., Zheng, W.-S., Jia, J.: Underexposed photo enhancement using deep illumination estimation, in Proc. IEEE\/CVF conf. comput. vis. pattern recogn., pp. 6849\u20136857 (2019)","DOI":"10.1109\/CVPR.2019.00701"},{"key":"2089_CR27","doi-asserted-by":"crossref","unstructured":"Guo, C., Li, C., Guo, J., Loy, C. C., Hou, J., Kwong, S., Cong, R.: Zero-reference deep curve estimation for low-light image enhancement, in Proc. IEEE\/CVF conf. comput. vis. pattern recogn., pp. 1780\u20131789 (2020)","DOI":"10.1109\/CVPR42600.2020.00185"},{"issue":"8","key":"2089_CR28","first-page":"4225","volume":"44","author":"C Li","year":"2021","unstructured":"Li, C., Guo, C., Loy, C.C.: Learning to enhance low-light image via zero-reference deep curve estimation. IEEE Trans. Pattern Anal. Mach. Intell. 44(8), 4225\u20134238 (2021)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"2089_CR29","doi-asserted-by":"crossref","unstructured":"Ma, L., Ma, T., Xu, C., Liu, J., Fan, X., Luo, Z., Liu, R.: Learning with self-calibrator for fast and robust low-light image enhancement, IEEE Trans. Pattern Anal. Mach. Intell. (2025)","DOI":"10.1109\/TPAMI.2025.3586712"},{"issue":"3","key":"2089_CR30","first-page":"2834","volume":"39","author":"G Fan","year":"2025","unstructured":"Fan, G., Yao, Z., Chen, G.-Y., Su, J.-N., Gan, M.: Iniretinex: Rethinking retinex-type low-light image enhancer via initialization perspective. Proc. AAAI Conf. Artif. Intell. 39(3), 2834\u20132842 (2025)","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"2089_CR31","doi-asserted-by":"crossref","unstructured":"Feng, H., Wang, L., Wang, Y., Huang, H.: Learnability enhancement for low-light raw denoising: Where paired real data meets noise modeling, in Proc. 30th ACM Intl. Conf. Multimed., pp. 1436\u20131444 (2022)","DOI":"10.1145\/3503161.3548186"},{"key":"2089_CR32","doi-asserted-by":"crossref","unstructured":"Yao, Z., Fan, G., Fan, J., Gan, M., Chen, C. P.: Spatial-frequency dual-domain feature fusion network for low-light remote sensing image enhancement, IEEE Trans. Geoscience and Remote Sens. (2024)","DOI":"10.1109\/TGRS.2024.3434416"},{"key":"2089_CR33","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2024.104276","volume":"252","author":"G Fan","year":"2025","unstructured":"Fan, G., Yao, Z., Gan, M.: Illumination-aware and structure-guided transformer for low-light image enhancement. Comput. Vis. Image Underst. 252, 104276 (2025)","journal-title":"Comput. Vis. Image Underst."},{"key":"2089_CR34","first-page":"1","volume":"73","author":"Z Yao","year":"2024","unstructured":"Yao, Z., Su, J.-N., Fan, G., Gan, M., Chen, C.P.: Gaca: A gradient-aware and contrastive-adaptive learning framework for low-light image enhancement. IEEE Trans. Instrum. Meas. 73, 1\u201314 (2024)","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"2089_CR35","doi-asserted-by":"crossref","unstructured":"Wu, K., Huang, J., Ma, Y., Fan, F., Ma, J.: Mutually reinforcing learning of decoupled degradation and diffusion enhancement for unpaired low-light image lightening, IEEE Trans. Image Process., (2025)","DOI":"10.1109\/TIP.2025.3553070"},{"key":"2089_CR36","doi-asserted-by":"crossref","unstructured":"Wang, C., Wu, H., Jin, Z.: Fourllie: Boosting low-light image enhancement by fourier frequency information, in Proc. 31st ACM Intl. Conf. Multimed., pp. 7459\u20137469 (2023)","DOI":"10.1145\/3581783.3611909"},{"key":"2089_CR37","doi-asserted-by":"publisher","first-page":"4980","DOI":"10.1109\/TIP.2020.2977573","volume":"29","author":"J Ma","year":"2020","unstructured":"Ma, J., Xu, H., Jiang, J., Mei, X., Zhang, X.-P.: Ddcgan: A dual-discriminator conditional generative adversarial network for multi-resolution image fusion. IEEE Trans. Image Process. 29, 4980\u20134995 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"2089_CR38","doi-asserted-by":"publisher","first-page":"1383","DOI":"10.1109\/TMM.2020.2997127","volume":"23","author":"J Li","year":"2020","unstructured":"Li, J., Huo, H., Li, C., Wang, R., Feng, Q.: Attentionfgan: Infrared and visible image fusion using attention-based generative adversarial networks. IEEE Trans. Multimed. 23, 1383\u20131396 (2020)","journal-title":"IEEE Trans. Multimed."},{"issue":"5","key":"2089_CR39","doi-asserted-by":"publisher","first-page":"2614","DOI":"10.1109\/TIP.2018.2887342","volume":"28","author":"H Li","year":"2018","unstructured":"Li, H., Wu, X.-J.: Densefuse: A fusion approach to infrared and visible images. IEEE Trans. Image Process. 28(5), 2614\u20132623 (2018)","journal-title":"IEEE Trans. Image Process."},{"key":"2089_CR40","doi-asserted-by":"publisher","first-page":"99","DOI":"10.1016\/j.inffus.2019.07.011","volume":"54","author":"Y Zhang","year":"2020","unstructured":"Zhang, Y., Liu, Y., Sun, P., Yan, H., Zhao, X., Zhang, L.: Ifcnn: A general image fusion framework based on convolutional neural network. Information Fusion 54, 99\u2013118 (2020)","journal-title":"Information Fusion"},{"issue":"1","key":"2089_CR41","doi-asserted-by":"publisher","first-page":"105","DOI":"10.1109\/TCSVT.2021.3056725","volume":"32","author":"J Liu","year":"2021","unstructured":"Liu, J., Fan, X., Jiang, J., Liu, R., Luo, Z.: Learning a deep multi-scale feature ensemble and an edge-attention guidance for image fusion. IEEE Trans. Circuits Syst. Video Technol. 32(1), 105\u2013119 (2021)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"2089_CR42","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TIM.2020.3022438","volume":"70","author":"L Jian","year":"2020","unstructured":"Jian, L., Yang, X., Liu, Z., Jeon, G., Gao, M., Chisholm, D.: Sedrfuse: A symmetric encoder-decoder with residual block network for infrared and visible image fusion. IEEE Trans. Instrum. Meas. 70, 1\u201315 (2020)","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"2089_CR43","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1016\/j.inffus.2018.09.004","volume":"48","author":"J Ma","year":"2019","unstructured":"Ma, J., Yu, W., Liang, P., Li, C., Jiang, J.: Fusiongan: A generative adversarial network for infrared and visible image fusion. Information fusion 48, 11\u201326 (2019)","journal-title":"Information fusion"},{"key":"2089_CR44","first-page":"1","volume":"71","author":"Z Huang","year":"2022","unstructured":"Huang, Z., Li, J., Hua, Z., Fan, L.: Underwater image enhancement via adaptive group attention-based multiscale cascade transformer. IEEE Trans. Instrum. Meas. 71, 1\u201318 (2022)","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"2089_CR45","first-page":"1","volume":"60","author":"X Su","year":"2022","unstructured":"Su, X., Li, J., Hua, Z.: Transformer-based regression network for pansharpening remote sensing images. IEEE Trans. Geosci. Remote Sens. 60, 1\u201323 (2022)","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"2089_CR46","first-page":"1","volume":"71","author":"J Li","year":"2022","unstructured":"Li, J., Zhu, J., Li, C., Chen, X., Yang, B.: Cgtf: Convolution-guided transformer for infrared and visible image fusion. IEEE Trans. Instrum. Meas. 71, 1\u201314 (2022)","journal-title":"IEEE Trans. Instrum. Meas."},{"issue":"7","key":"2089_CR47","doi-asserted-by":"publisher","first-page":"1200","DOI":"10.1109\/JAS.2022.105686","volume":"9","author":"J Ma","year":"2022","unstructured":"Ma, J., Tang, L., Fan, F., Huang, J., Mei, X., Ma, Y.: Swinfusion: Cross-domain long-range learning for general image fusion via swin transformer. IEEE\/CAA J. Automatica Sinica 9(7), 1200\u20131217 (2022)","journal-title":"IEEE\/CAA J. Automatica Sinica"},{"issue":"7","key":"2089_CR48","doi-asserted-by":"publisher","first-page":"3159","DOI":"10.1109\/TCSVT.2023.3234340","volume":"33","author":"W Tang","year":"2023","unstructured":"Tang, W., He, F., Liu, Y., Duan, Y., Si, T.: Datfuse: Infrared and visible image fusion via dual attention transformer. IEEE Trans. Circuits Syst. Video Technol. 33(7), 3159\u20133172 (2023)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"2089_CR49","doi-asserted-by":"crossref","unstructured":"Feng, C., Zhuo, S., Zhang, X., Shen, L., S\u00fcsstrunk, S.: Near-infrared guided color image dehazing, in 2013 IEEE intl. conf. image process. IEEE 2013, 2363\u20132367 (2013)","DOI":"10.1109\/ICIP.2013.6738487"},{"issue":"8","key":"2089_CR50","doi-asserted-by":"publisher","first-page":"587","DOI":"10.1049\/iet-ipr.2017.0192","volume":"11","author":"D-W Jang","year":"2017","unstructured":"Jang, D.-W., Park, R.-H.: Colour image dehazing using near-infrared fusion. IET Image Proc. 11(8), 587\u2013594 (2017)","journal-title":"IET Image Proc."},{"key":"2089_CR51","doi-asserted-by":"publisher","first-page":"1063","DOI":"10.1007\/s12046-017-0673-1","volume":"42","author":"AV Vanmali","year":"2017","unstructured":"Vanmali, A.V., Gadre, V.M.: Visible and nir image fusion using weight-map-guided laplacian-gaussian pyramid for improving scene visibility. S\u0101dhan\u0101 42, 1063\u20131082 (2017)","journal-title":"S\u0101dhan\u0101"},{"issue":"11","key":"2089_CR52","doi-asserted-by":"publisher","first-page":"3111","DOI":"10.1109\/TCSVT.2017.2748150","volume":"28","author":"C-H Son","year":"2017","unstructured":"Son, C.-H., Zhang, X.-P.: Near-infrared fusion via color regularization for haze and color distortion removals. IEEE Trans. Circuits Syst. Video Technol. 28(11), 3111\u20133126 (2017)","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"2089_CR53","doi-asserted-by":"publisher","first-page":"28","DOI":"10.1016\/j.inffus.2021.12.004","volume":"82","author":"L Tang","year":"2022","unstructured":"Tang, L., Yuan, J., Ma, J.: Image fusion in the loop of high-level vision tasks: A semantic-aware real-time infrared and visible image fusion network. Information Fusion 82, 28\u201342 (2022)","journal-title":"Information Fusion"},{"key":"2089_CR54","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2023.101870","volume":"99","author":"L Tang","year":"2023","unstructured":"Tang, L., Zhang, H., Xu, H., Ma, J.: Rethinking the necessity of image fusion in high-level vision tasks: A practical infrared and visible image fusion network based on progressive semantic injection and scene fidelity. Information Fusion 99, 101870 (2023)","journal-title":"Information Fusion"},{"key":"2089_CR55","doi-asserted-by":"crossref","unstructured":"Liu, J., Liu, Z., Wu, G., Ma, L., Liu, R., Zhong, W., Luo, Z., Fan, X.: Multi-interactive feature learning and a full-time multi-modality benchmark for image fusion and segmentation, in Proc. IEEE\/CVF intl. conf. comput. vis., pp. 8115\u20138124 (2023)","DOI":"10.1109\/ICCV51070.2023.00745"},{"key":"2089_CR56","unstructured":"Zhao, W., Xie, S., Zhao, F., He, Y., Lu, H.: Metafusion: Infrared and visible image fusion via meta-feature embedding from object detection, in Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recogn., pp. 13 955\u201313 965 (2023)"},{"key":"2089_CR57","doi-asserted-by":"crossref","unstructured":"Liu, J., Fan, X., Huang, Z., Wu, G., Liu, R., Zhong, W., Luo, Z.: Target-aware dual adversarial learning and a multi-scenario multi-modality benchmark to fuse infrared and visible for object detection, in Proc. IEEE\/CVF conf. comput. vis. pattern recogn., pp. 5802\u20135811 (2022)","DOI":"10.1109\/CVPR52688.2022.00571"},{"key":"2089_CR58","doi-asserted-by":"crossref","unstructured":"Sun, Y., Cao, B., Zhu, P., Hu, Q.: Detfusion: A detection-driven infrared and visible image fusion network, in Proc. 30th ACM intl. conf. multimed., pp. 4003\u20134011 (2022)","DOI":"10.1145\/3503161.3547902"},{"key":"2089_CR59","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2023.102039","volume":"102","author":"X Wang","year":"2024","unstructured":"Wang, X., Guan, Z., Qian, W., Cao, J., Liang, S., Yan, J.: Cs2fusion: Contrastive learning for self-supervised infrared and visible image fusion by estimating feature compensation map. Information Fusion 102, 102039 (2024)","journal-title":"Information Fusion"},{"key":"2089_CR60","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2024.128105","volume":"599","author":"M Yu","year":"2024","unstructured":"Yu, M., Cui, T., Lu, H., Yue, Y.: Vifnet: An end-to-end visible-infrared fusion network for image dehazing. Neurocomputing 599, 128105 (2024)","journal-title":"Neurocomputing"},{"key":"2089_CR61","unstructured":"Wei, C., Wang, W., Yang, W., Liu, J.: Deep retinex decomposition for low-light enhancement, arXiv:1808.04560 (2018)"},{"key":"2089_CR62","doi-asserted-by":"crossref","unstructured":"Wu, W., Weng, J., Zhang, P., Wang, X., Yang, W., Jiang, J.: Uretinex-net: Retinex-based deep unfolding network for low-light image enhancement,\u201d in Proc. IEEE\/CVF conf. comput. vis. pattern recogn., pp. 5901\u20135910 (2022)","DOI":"10.1109\/CVPR52688.2022.00581"},{"key":"2089_CR63","unstructured":"Fu, Z., Yang, Y., Tu, X., Huang, Y., Ding, X., Ma, K.-K.: Learning a simple low-light image enhancer from paired low-light instances, in Proc. IEEE\/CVF conf. comput. vis. pattern recogn., pp. 22 252\u201322 261 (2023)"},{"key":"2089_CR64","doi-asserted-by":"crossref","unstructured":"Tan, J., Pei, S., Qin, W., Fu, B., Li, X., Huang, L.: Wavelet-based mamba with fourier adjustment for low-light image enhancement, in Proc. Asian Conf. Comput. Vis., pp. 3449\u20133464 (2024)","DOI":"10.1007\/978-981-96-0911-6_10"},{"key":"2089_CR65","doi-asserted-by":"crossref","unstructured":"Wang, H., Yan, X., Hou, X., Zhang, K., Dun, Y.: Extracting noise and darkness: Low-light image enhancement via dual prior guidance, IEEE Trans. Circuits Syst. Vid. Tech., (2024)","DOI":"10.1109\/TCSVT.2024.3480930"},{"issue":"2","key":"2089_CR66","doi-asserted-by":"publisher","first-page":"982","DOI":"10.1109\/TIP.2016.2639450","volume":"26","author":"X Guo","year":"2016","unstructured":"Guo, X., Li, Y., Ling, H.: Lime: Low-light image enhancement via illumination map estimation. IEEE Trans. Image Process. 26(2), 982\u2013993 (2016)","journal-title":"IEEE Trans. Image Process."},{"issue":"12","key":"2089_CR67","doi-asserted-by":"publisher","first-page":"5372","DOI":"10.1109\/TIP.2013.2284059","volume":"22","author":"C Lee","year":"2013","unstructured":"Lee, C., Lee, C., Kim, C.-S.: Contrast enhancement based on layered difference representation of 2d histograms. IEEE Trans. Image Process. 22(12), 5372\u20135384 (2013)","journal-title":"IEEE Trans. Image Process."},{"key":"2089_CR68","doi-asserted-by":"publisher","first-page":"5310","DOI":"10.1109\/TIP.2023.3315123","volume":"32","author":"X Tan","year":"2023","unstructured":"Tan, X., Chen, H., Zhang, R., Wang, Q., Kan, Y., Zheng, J., Jin, Y., Chen, E.: Deep multi-exposure image fusion for dynamic scenes. IEEE Trans. Image Process. 32, 5310\u20135325 (2023)","journal-title":"IEEE Trans. Image Process."},{"key":"2089_CR69","doi-asserted-by":"crossref","unstructured":"Lee, D.-G., Jeon, M.-H., Cho, Y., Kim, A.: Edge-guided multi-domain rgb-to-tir image translation for training vision tasks with challenging labels, arXiv:2301.12689 (2023)","DOI":"10.1109\/ICRA48891.2023.10161210"},{"key":"2089_CR70","unstructured":"Woo, S., Park, J., Lee, J.-Y., Kweon, I. S.: Cbam: Convolutional block attention module, in Proc. European conf. comput. vis. (ECCV)"},{"key":"2089_CR71","doi-asserted-by":"publisher","first-page":"30","DOI":"10.1016\/j.cviu.2018.10.010","volume":"178","author":"YP Loh","year":"2019","unstructured":"Loh, Y.P., Chan, C.S.: Getting to know low-light images with the exclusively dark dataset. Comput. Vis. Image Underst. 178, 30\u201342 (2019)","journal-title":"Comput. Vis. Image Underst."},{"key":"2089_CR72","unstructured":"Jocher, G., Chaurasia, A., Qiu, J.: Yolo by ultralytics (2023)"},{"key":"2089_CR73","doi-asserted-by":"publisher","first-page":"5737","DOI":"10.1109\/TIP.2020.2981922","volume":"29","author":"W Yang","year":"2020","unstructured":"Yang, W., Yuan, Y., Ren, W., Liu, J., Scheirer, W.J., Wang, Z., Zhang, T., Zhong, Q., Xie, D., Pu, S., et al.: Advancing image understanding in poor visibility environments: A collective benchmark study. IEEE Trans. Image Process. 29, 5737\u20135752 (2020)","journal-title":"IEEE Trans. Image Process."}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-025-02089-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-025-02089-6","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-025-02089-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T04:18:40Z","timestamp":1770783520000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-025-02089-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,4]]},"references-count":73,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2026,2]]}},"alternative-id":["2089"],"URL":"https:\/\/doi.org\/10.1007\/s00530-025-02089-6","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,12,4]]},"assertion":[{"value":"14 July 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 November 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"4 December 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have influenced the work presented in this paper. The authors declare no Conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"20"}}