{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T10:45:15Z","timestamp":1774435515787,"version":"3.50.1"},"reference-count":21,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2023,9,5]],"date-time":"2023-09-05T00:00:00Z","timestamp":1693872000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,9,5]],"date-time":"2023-09-05T00:00:00Z","timestamp":1693872000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Key Research and Development Plan General Project of Shaanxi Provincial Science and Technology Department","award":["2023-YBGY-032"],"award-info":[{"award-number":["2023-YBGY-032"]}]},{"name":"Key Research and Development Plan General Project of Shaanxi Provincial Science and Technology Department","award":["2023-YBGY-032"],"award-info":[{"award-number":["2023-YBGY-032"]}]},{"name":"Key Research and Development Plan General Project of Shaanxi Provincial Science and Technology Department","award":["2023-YBGY-032"],"award-info":[{"award-number":["2023-YBGY-032"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["SIViP"],"published-print":{"date-parts":[[2024,2]]},"DOI":"10.1007\/s11760-023-02748-z","type":"journal-article","created":{"date-parts":[[2023,9,5]],"date-time":"2023-09-05T07:02:25Z","timestamp":1693897345000},"page":"295-303","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["An end-to-end based on semantic region guidance for infrared and visible image fusion"],"prefix":"10.1007","volume":"18","author":[{"given":"Guijin","family":"Han","sequence":"first","affiliation":[]},{"given":"Xinyuan","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Ya","family":"Huang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,9,5]]},"reference":[{"key":"2748_CR1","doi-asserted-by":"publisher","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770\u2013778 (2016). https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"2748_CR2","doi-asserted-by":"publisher","unstructured":"Huang, G., Liu, Z., Van Der\u00a0Maaten, L., Weinberger, K.Q.: Densely connected convolutional networks. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2261\u20132269 (2017). https:\/\/doi.org\/10.1109\/CVPR.2017.243","DOI":"10.1109\/CVPR.2017.243"},{"key":"2748_CR3","doi-asserted-by":"publisher","unstructured":"Prabhakar, K.R., Srikar, V.S., Babu, R.V.: Deepfuse: a deep unsupervised approach for exposure fusion with extreme exposure image pairs. In: 2017 IEEE International Conference on Computer Vision (ICCV), pp. 4724\u20134732 (2017). https:\/\/doi.org\/10.1109\/ICCV.2017.505","DOI":"10.1109\/ICCV.2017.505"},{"key":"2748_CR4","doi-asserted-by":"publisher","first-page":"72","DOI":"10.1016\/j.inffus.2021.02.023","volume":"73","author":"H Li","year":"2021","unstructured":"Li, H., Wu, X.-J., Kittler, J.: Rfn-nest: an end-to-end residual fusion network for infrared and visible images. Inf. Fusion 73, 72\u201386 (2021). https:\/\/doi.org\/10.1016\/j.inffus.2021.02.023","journal-title":"Inf. Fusion"},{"key":"2748_CR5","doi-asserted-by":"publisher","first-page":"79","DOI":"10.1016\/j.inffus.2022.03.007","volume":"83\u201384","author":"L Tang","year":"2022","unstructured":"Tang, L., Yuan, J., Zhang, H., Jiang, X., Ma, J.: Piafusion: a progressive infrared and visible image fusion network based on illumination aware. Inf. Fusion 83\u201384, 79\u201392 (2022). https:\/\/doi.org\/10.1016\/j.inffus.2022.03.007","journal-title":"Inf. Fusion"},{"key":"2748_CR6","doi-asserted-by":"publisher","first-page":"28","DOI":"10.1016\/j.inffus.2021.12.004","volume":"82","author":"L Tang","year":"2022","unstructured":"Tang, L., Yuan, J., Ma, J.: Image fusion in the loop of high-level vision tasks: a semantic-aware real-time infrared and visible image fusion network. Inf. Fusion 82, 28\u201342 (2022). https:\/\/doi.org\/10.1016\/j.inffus.2021.12.004","journal-title":"Inf. Fusion"},{"key":"2748_CR7","doi-asserted-by":"crossref","unstructured":"Zhao, Z., Xu, S., Zhang, C., Liu, J., Li, P., Zhang, J.: DIDFuse: deep image decomposition for infrared and visible image fusion (2020)","DOI":"10.24963\/ijcai.2020\/135"},{"key":"2748_CR8","unstructured":"Goodfellow, I.J., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial networks (2014)"},{"key":"2748_CR9","doi-asserted-by":"publisher","unstructured":"Ma, J., Yu, W., Liang, P., Li, C., Jiang, J.: Fusiongan: a generative adversarial network for infrared and visible image fusion. Inf. Fusion 48, 11\u201326 (2019). https:\/\/doi.org\/10.1016\/j.inffus.2018.09.004","DOI":"10.1016\/j.inffus.2018.09.004"},{"key":"2748_CR10","doi-asserted-by":"publisher","first-page":"635","DOI":"10.1109\/TMM.2021.3129609","volume":"25","author":"H Zhou","year":"2023","unstructured":"Zhou, H., Wu, W., Zhang, Y., Ma, J., Ling, H.: Semantic-supervised infrared and visible image fusion via a dual-discriminator generative adversarial network. IEEE Trans. Multimed. 25, 635\u2013648 (2023). https:\/\/doi.org\/10.1109\/TMM.2021.3129609","journal-title":"IEEE Trans. Multimed."},{"key":"2748_CR11","doi-asserted-by":"publisher","unstructured":"Liu, J., Fan, X., Huang, Z., Wu, G., Liu, R., Zhong, W., Luo, Z.: Target-aware dual adversarial learning and a multi-scenario multi-modality benchmark to fuse infrared and visible for object detection. In: 2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 5792\u20135801 (2022). https:\/\/doi.org\/10.1109\/CVPR52688.2022.00571","DOI":"10.1109\/CVPR52688.2022.00571"},{"key":"2748_CR12","doi-asserted-by":"crossref","unstructured":"Yue, J., Fang, L., Xia, S., Deng, Y., Ma, J.: Dif-fusion: towards high color fidelity in infrared and visible image fusion with diffusion models (2023)","DOI":"10.1109\/TIP.2023.3322046"},{"key":"2748_CR13","unstructured":"Sohl-Dickstein, J., Weiss, E.A., Maheswaranathan, N., Ganguli, S.: Deep unsupervised learning using nonequilibrium thermodynamics (2015)"},{"key":"2748_CR14","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models (2020)"},{"key":"2748_CR15","doi-asserted-by":"publisher","first-page":"334","DOI":"10.1007\/978-3-030-01261-8_20","volume-title":"Computer Vision\u2013ECCV 2018","author":"C Yu","year":"2018","unstructured":"Yu, C., Wang, J., Peng, C., Gao, C., Yu, G., Sang, N.: Bisenet: bilateral segmentation network for real-time semantic segmentation. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) Computer Vision\u2013ECCV 2018, pp. 334\u2013349. Springer, Cham (2018)"},{"key":"2748_CR16","unstructured":"Chen, L.-C., Papandreou, G., Schroff, F., Adam, H.: Rethinking atrous convolution for semantic image segmentation (2017)"},{"key":"2748_CR17","doi-asserted-by":"publisher","unstructured":"Zhang, W., Ma, K., Yan, J., Deng, D., Wang, Z.: Blind image quality assessment using a deep bilinear convolutional neural network. IEEE Trans. Circuits Syst. Video Technol. 30(1), 36\u201347 (2020). https:\/\/doi.org\/10.1109\/TCSVT.2018.2886771","DOI":"10.1109\/TCSVT.2018.2886771"},{"issue":"12","key":"2748_CR18","doi-asserted-by":"publisher","first-page":"4695","DOI":"10.1109\/TIP.2012.2214050","volume":"21","author":"A Mittal","year":"2012","unstructured":"Mittal, A., Moorthy, A.K., Bovik, A.C.: No-reference image quality assessment in the spatial domain. IEEE Trans. Image Process. 21(12), 4695\u20134708 (2012). https:\/\/doi.org\/10.1109\/TIP.2012.2214050","journal-title":"IEEE Trans. Image Process."},{"key":"2748_CR19","unstructured":"Jia, X., Zhu, C., Li, M., Tang, W., Liu, S., Zhou, W.: LLVIP: a visible-infrared paired dataset for low-light vision (2023)"},{"key":"2748_CR20","doi-asserted-by":"publisher","unstructured":"Xu, H., Ma, J., Le, Z., Jiang, J., Guo, X.: Fusiondn: a unified densely connected network for image fusion. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 12484\u201312491 (2020). https:\/\/doi.org\/10.1609\/aaai.v34i07.6936","DOI":"10.1609\/aaai.v34i07.6936"},{"key":"2748_CR21","doi-asserted-by":"publisher","first-page":"249","DOI":"10.1016\/j.dib.2017.09.038","volume":"15","author":"A Toet","year":"2017","unstructured":"Toet, A.: The TNO multiband image data collection. Data Brief 15, 249\u2013251 (2017). https:\/\/doi.org\/10.1016\/j.dib.2017.09.038","journal-title":"Data Brief"}],"container-title":["Signal, Image and Video Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-023-02748-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11760-023-02748-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-023-02748-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,25]],"date-time":"2024-01-25T10:37:56Z","timestamp":1706179076000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11760-023-02748-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,9,5]]},"references-count":21,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2024,2]]}},"alternative-id":["2748"],"URL":"https:\/\/doi.org\/10.1007\/s11760-023-02748-z","relation":{"has-preprint":[{"id-type":"doi","id":"10.21203\/rs.3.rs-3154119\/v1","asserted-by":"object"}]},"ISSN":["1863-1703","1863-1711"],"issn-type":[{"value":"1863-1703","type":"print"},{"value":"1863-1711","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,9,5]]},"assertion":[{"value":"9 July 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"11 August 2023","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"14 August 2023","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 September 2023","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}},{"value":"Not applicable.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to Participate"}},{"value":"Not applicable.","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for Publication"}}]}}