{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,27]],"date-time":"2025-10-27T16:21:41Z","timestamp":1761582101083},"reference-count":23,"publisher":"Institute of Electronics, Information and Communications Engineers (IEICE)","issue":"12","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEICE Trans. Fundamentals"],"published-print":{"date-parts":[[2021,12,1]]},"DOI":"10.1587\/transfun.2021eal2020","type":"journal-article","created":{"date-parts":[[2021,6,9]],"date-time":"2021-06-09T22:08:32Z","timestamp":1623276512000},"page":"1733-1738","source":"Crossref","is-referenced-by-count":6,"title":["Semantic Guided Infrared and Visible Image Fusion"],"prefix":"10.1587","volume":"E104.A","author":[{"given":"Wei","family":"WU","sequence":"first","affiliation":[{"name":"school of Wuhan Institute of Technology, Hubei Key Laboratory of Intelligent Robot"}]},{"given":"Dazhi","family":"ZHANG","sequence":"additional","affiliation":[{"name":"Research Institute of Nuclear Power Operation"}]},{"given":"Jilei","family":"HOU","sequence":"additional","affiliation":[{"name":"school of Wuhan Institute of Technology, Hubei Key Laboratory of Intelligent Robot"}]},{"given":"Yu","family":"WANG","sequence":"additional","affiliation":[{"name":"school of Wuhan Institute of Technology, Hubei Key Laboratory of Intelligent Robot"}]},{"given":"Tao","family":"LU","sequence":"additional","affiliation":[{"name":"school of Wuhan Institute of Technology, Hubei Key Laboratory of Intelligent Robot"}]},{"given":"Huabing","family":"ZHOU","sequence":"additional","affiliation":[{"name":"school of Wuhan Institute of Technology, Hubei Key Laboratory of Intelligent Robot"}]}],"member":"532","reference":[{"key":"1","doi-asserted-by":"publisher","unstructured":"[1] J. Ma, Y. Ma, and C. Li, \u201cInfrared and visible image fusion methods and applications: A survey,\u201d Inf. Fusion, vol.45, pp.153-178, 2019. 10.1016\/j.inffus.2018.02.004","DOI":"10.1016\/j.inffus.2018.02.004"},{"key":"2","doi-asserted-by":"publisher","unstructured":"[2] J. Ma, L. Tang, M. Xu, H. Zhang, and G. Xiao, \u201cSTDFusionNet: An infrared and visible image fusion network based on salient target detection,\u201d IEEE Trans. Instrum. Meas., vol.70, pp.1-13, 2021. 10.1109\/tim.2021.3075747","DOI":"10.1109\/TIM.2021.3075747"},{"key":"3","doi-asserted-by":"publisher","unstructured":"[3] J. Ma, H. Xu, J. Jiang, X. Mei, and X.P. Zhang, \u201cDDcGAN: A dual-discriminator conditional generative adversarial network for multi-resolution image fusion,\u201d IEEE Trans. Image Process., vol.29, pp.4980-4995, 2020. 10.1109\/tip.2020.2977573","DOI":"10.1109\/TIP.2020.2977573"},{"key":"4","doi-asserted-by":"publisher","unstructured":"[4] H. Zhou, J. Ma, C. Yang, S. Sun, R. Liu, and J. Zhao, \u201cNonrigid feature matching for remote sensing images via probabilistic inference with global and local regularizations,\u201d IEEE Geosci. Remote Sens. Lett., vol.13, no.3, pp.374-378, 2016. 10.1109\/lgrs.2016.2514521","DOI":"10.1109\/LGRS.2016.2514521"},{"key":"5","doi-asserted-by":"publisher","unstructured":"[5] H. Zhou, J. Ma, C.C. Tan, Y. Zhang, and H. Ling, \u201cCross-weather image alignment via latent generative model with intensity consistency,\u201d IEEE Trans. Image Process., vol.29, pp.5216-5228, 2020. 10.1109\/tip.2020.2980210","DOI":"10.1109\/TIP.2020.2980210"},{"key":"6","doi-asserted-by":"publisher","unstructured":"[6] J. Ma, H. Zhou, J. Zhao, Y. Gao, J. Jiang, and J. Tian, \u201cRobust feature matching for remote sensing image registration via locally linear transforming,\u201d IEEE Trans. Geosci. Remote Sens., vol.53, no.12, pp.6469-6481, 2015. 10.1109\/tgrs.2015.2441954","DOI":"10.1109\/TGRS.2015.2441954"},{"key":"7","doi-asserted-by":"publisher","unstructured":"[7] H. Zhou, A. Dai, T. Tian, Y. Tian, Z. Yu, Y. Wu, and Y. Zhang, \u201cFeature matching for remote sensing image registration via manifold regularization,\u201d IEEE J. Sel. Topics Appl. Earth Observ., vol.13, pp.4564-4574, 2020. 10.1109\/jstars.2020.3015350","DOI":"10.1109\/JSTARS.2020.3015350"},{"key":"8","doi-asserted-by":"publisher","unstructured":"[8] P. Yi, Z. Wang, K. Jiang, Z. Shao, and J. Ma, \u201cMulti-temporal ultra dense memory network for video super-resolution,\u201d IEEE Trans. Circuits Syst. Video Technol., vol.30, no.8, pp.2503-2516, 2020. 10.1109\/tcsvt.2019.2925844","DOI":"10.1109\/TCSVT.2019.2925844"},{"key":"9","doi-asserted-by":"publisher","unstructured":"[9] K. Jiang, Z. Wang, P. Yi, G. Wang, T. Lu, and J. Jiang, \u201cEdge-enhanced gan for remote sensing image superresolution,\u201d IEEE Trans. Geosci. Remote Sens., vol.57, no.8, pp.5799-5812, 2019. 10.1109\/tgrs.2019.2902431","DOI":"10.1109\/TGRS.2019.2902431"},{"key":"10","doi-asserted-by":"publisher","unstructured":"[10] Z. Chen and S. Muramatsu, \u201cMulti-focus image fusion based on multiple directional LOTs,\u201d IEICE Trans. Fundamentals, vol.E98-A, no.11, pp.2360-2365, Nov. 2015. 10.1587\/transfun.e98.a.2360","DOI":"10.1587\/transfun.E98.A.2360"},{"key":"11","doi-asserted-by":"publisher","unstructured":"[11] D. Bulanon, T. Burks, and V. Alchanatis, \u201cImage fusion of visible and thermal images for fruit detection,\u201d Biosyst. Eng., vol.103, no.1, pp.12-22, 2009. 10.1016\/j.biosystemseng.2009.02.009","DOI":"10.1016\/j.biosystemseng.2009.02.009"},{"key":"12","doi-asserted-by":"publisher","unstructured":"[12] S. Li, X. Kang, and J. Hu, \u201cImage fusion with guided filtering,\u201d IEEE Trans. Image Process., vol.22, no.7, pp.2864-2875, 2013. 10.1109\/tip.2013.2244222","DOI":"10.1109\/TIP.2013.2244222"},{"key":"13","doi-asserted-by":"publisher","unstructured":"[13] H. Xu, J. Ma, J. Jiang, X. Guo, and H. Ling, \u201cU2Fusion: A unified unsupervised image fusion network,\u201d IEEE Trans. Pattern Anal. Mach. Intell., 2020. 10.1109\/tpami.2020.3012548","DOI":"10.1109\/TPAMI.2020.3012548"},{"key":"14","doi-asserted-by":"publisher","unstructured":"[14] H. Li and X.J. Wu, \u201cDensefuse: A fusion approach to infrared and visible images,\u201d IEEE Trans. Image Process., vol.28, no.5, pp.2614-2623, 2018. 10.1109\/tip.2018.2887342","DOI":"10.1109\/TIP.2018.2887342"},{"key":"15","doi-asserted-by":"publisher","unstructured":"[15] H. Zhang, H. Xu, Y. Xiao, X. Guo, and J. Ma, \u201cRethinking the image fusion: A fast unified image fusion network based on proportional maintenance of gradient and intensity,\u201d AAAI, vol.34, no.7, pp.12797-12804, 2020. 10.1609\/aaai.v34i07.6975","DOI":"10.1609\/aaai.v34i07.6975"},{"key":"16","doi-asserted-by":"publisher","unstructured":"[16] J. Ma, W. Yu, P. Liang, C. Li, and J. Jiang, \u201cFusiongan: A generative adversarial network for infrared and visible image fusion,\u201d Inf. Fusion, vol.48, pp.11-26, 2019. 10.1016\/j.inffus.2018.09.004","DOI":"10.1016\/j.inffus.2018.09.004"},{"key":"17","doi-asserted-by":"publisher","unstructured":"[17] A. Toet, \u201cImage fusion by a ratio of low-pass pyramid,\u201d Pattern Recognit. Lett., vol.9, no.4, pp.245-253, 1989. 10.1016\/0167-8655(89)90003-2","DOI":"10.1016\/0167-8655(89)90003-2"},{"key":"18","doi-asserted-by":"publisher","unstructured":"[18] J.J. Lewis, R.J. O&apos;Callaghan, S.G. Nikolov, D.R. Bull, and N. Canagarajah, \u201cPixel- and region-based image fusion with complex wavelets,\u201d Inf. Fusion, vol.8, no.2, pp.119-130, 2007. 10.1016\/j.inffus.2005.09.006","DOI":"10.1016\/j.inffus.2005.09.006"},{"key":"19","doi-asserted-by":"publisher","unstructured":"[19] J. Ma, C. Chen, C. Li, and J. Huang, \u201cInfrared and visible image fusion via gradient transfer and total variation minimization,\u201d Inf. Fusion, vol.31, pp.100-109, 2016. 10.1016\/j.inffus.2016.02.001","DOI":"10.1016\/j.inffus.2016.02.001"},{"key":"20","doi-asserted-by":"crossref","unstructured":"[20] L.C. Chen, Y. Zhu, G. Papandreou, F. Schroff, and H. Adam, \u201cEncoder-decoder with atrous separable convolution for semantic image segmentation,\u201d ECCV, pp.833-851, 2018. 10.1007\/978-3-030-01234-2_49","DOI":"10.1007\/978-3-030-01234-2_49"},{"key":"21","doi-asserted-by":"publisher","unstructured":"[21] Z. Wang, A.C. Bovik, H.R. Sheikh, and E.P. Simoncelli, \u201cImage quality assessment: from error visibility to structural similarity,\u201d IEEE Trans. Image Process., vol.13, no.4, pp.600-612, 2004. 10.1109\/tip.2003.819861","DOI":"10.1109\/TIP.2003.819861"},{"key":"22","doi-asserted-by":"publisher","unstructured":"[22] G. Qu, D. Zhang, and P. Yan, \u201cInformation measure for performance of image fusion,\u201d Electron. Lett., vol.38, no.7, pp.313-315, 2002. 10.1049\/el:20020212","DOI":"10.1049\/el:20020212"},{"key":"23","doi-asserted-by":"publisher","unstructured":"[23] L. Liu, B. Liu, H. Huang, and A.C. Bovik, \u201cNo-reference image quality assessment based on spatial and spectral entropies,\u201d Signal Process.: Image Commun., vol.29, no.8, pp.856-863, 2014. 10.1016\/j.image.2014.06.006","DOI":"10.1016\/j.image.2014.06.006"}],"container-title":["IEICE Transactions on Fundamentals of Electronics, Communications and Computer Sciences"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/transfun\/E104.A\/12\/E104.A_2021EAL2020\/_pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2021,12,4]],"date-time":"2021-12-04T03:14:04Z","timestamp":1638587644000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/transfun\/E104.A\/12\/E104.A_2021EAL2020\/_article"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12,1]]},"references-count":23,"journal-issue":{"issue":"12","published-print":{"date-parts":[[2021]]}},"URL":"https:\/\/doi.org\/10.1587\/transfun.2021eal2020","relation":{},"ISSN":["0916-8508","1745-1337"],"issn-type":[{"value":"0916-8508","type":"print"},{"value":"1745-1337","type":"electronic"}],"subject":[],"published":{"date-parts":[[2021,12,1]]},"article-number":"2021EAL2020"}}