{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,31]],"date-time":"2025-12-31T12:07:21Z","timestamp":1767182841276,"version":"3.37.3"},"reference-count":81,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2021,6,26]],"date-time":"2021-06-26T00:00:00Z","timestamp":1624665600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2021,6,26]],"date-time":"2021-06-26T00:00:00Z","timestamp":1624665600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Machine Vision and Applications"],"published-print":{"date-parts":[[2021,7]]},"DOI":"10.1007\/s00138-021-01223-4","type":"journal-article","created":{"date-parts":[[2021,6,26]],"date-time":"2021-06-26T21:15:48Z","timestamp":1624742148000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["An unsupervised approach for thermal to visible image translation using autoencoder and generative adversarial network"],"prefix":"10.1007","volume":"32","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-7087-6276","authenticated-orcid":false,"given":"Heena","family":"Patel","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6306-0682","authenticated-orcid":false,"given":"Kishor P.","family":"Upla","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2021,6,26]]},"reference":[{"key":"1223_CR1","doi-asserted-by":"publisher","first-page":"52","DOI":"10.1016\/j.infrared.2016.01.009","volume":"76","author":"DP Bavirisetti","year":"2016","unstructured":"Bavirisetti, D.P., Dhuli, R.: Two-scale image fusion of visible and infrared images using saliency detection. Infrared Phys. Technol. 76, 52\u201364 (2016)","journal-title":"Infrared Phys. Technol."},{"key":"1223_CR2","doi-asserted-by":"crossref","unstructured":"Buades, A., Coll, B., Morel, J.M.: A non-local algorithm for image denoising. In: 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR\u201905), IEEE, vol.\u00a02, pp. 60\u201365 (2005)","DOI":"10.1109\/CVPR.2005.38"},{"key":"1223_CR3","doi-asserted-by":"crossref","unstructured":"Choi, Y., Choi, M., Kim, M., Ha, J.W., Kim, S., Choo, J.: Stargan: unified generative adversarial networks for multi-domain image-to-image translation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8789\u20138797 (2018)","DOI":"10.1109\/CVPR.2018.00916"},{"key":"1223_CR4","doi-asserted-by":"crossref","unstructured":"Choi, Y., Kim, N., Hwang, S., Kweon, I.S.: Thermal image enhancement using convolutional neural network. In: 2016 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS), IEEE, pp. 223\u2013230 (2016)","DOI":"10.1109\/IROS.2016.7759059"},{"key":"1223_CR5","unstructured":"Cogswell, M., Ahmed, F., Girshick, R., Zitnick, L., Batra, D.: Reducing overfitting in deep networks by decorrelating representations. arXiv preprint arXiv:1511.06068 (2015)"},{"issue":"12","key":"1223_CR6","doi-asserted-by":"publisher","first-page":"B42","DOI":"10.1364\/JOSAA.24.000B42","volume":"24","author":"S Gabarda","year":"2007","unstructured":"Gabarda, S., Crist\u00f3bal, G.: Blind image quality assessment through anisotropy. JOSA A 24(12), B42\u2013B51 (2007)","journal-title":"JOSA A"},{"issue":"7","key":"1223_CR7","doi-asserted-by":"publisher","first-page":"943","DOI":"10.1109\/LSP.2017.2696055","volume":"24","author":"R Gao","year":"2017","unstructured":"Gao, R., Vorobyov, S.A., Zhao, H.: Image fusion with cosparse analysis operator. IEEE Signal Process. Lett. 24(7), 943\u2013947 (2017)","journal-title":"IEEE Signal Process. Lett."},{"key":"1223_CR8","volume-title":"Deep Learning","author":"I Goodfellow","year":"2016","unstructured":"Goodfellow, I., Bengio, Y., Courville, A., Bengio, Y.: Deep Learning, vol. 1. MIT Press, Cambridge (2016)"},{"key":"1223_CR9","unstructured":"Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial nets. In: Advances in Neural Information Processing Systems, pp. 2672\u20132680 (2014)"},{"key":"1223_CR10","doi-asserted-by":"crossref","unstructured":"Guo, Y., Li, Y., Feris, R., Wang, L., Rosing, T.: Depthwise convolution is all you need for learning multiple visual domains, vol. 2. arXiv preprint arXiv:1902.00927 (2019)","DOI":"10.1609\/aaai.v33i01.33018368"},{"key":"1223_CR11","doi-asserted-by":"crossref","unstructured":"Gupta, R.K., Chia, A.Y.S., Rajan, D., Ng, E.S., Zhiyong, H.: Image colorization using similar images. In: Proceedings of the 20th ACM International Conference on Multimedia, ACM, pp. 369\u2013378 (2012)","DOI":"10.1145\/2393347.2393402"},{"key":"1223_CR12","doi-asserted-by":"crossref","unstructured":"Hamam, T., Dordek, Y., Cohen, D.: Single-band infrared texture-based image colorization. In: 2012 IEEE 27th Convention of Electrical and Electronics Engineers in Israel, IEEE, pp. 1\u20135 (2012)","DOI":"10.1109\/EEEI.2012.6377111"},{"key":"1223_CR13","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Delving deep into rectifiers: surpassing human-level performance on imagenet classification. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 1026\u20131034 (2015)","DOI":"10.1109\/ICCV.2015.123"},{"key":"1223_CR14","doi-asserted-by":"crossref","unstructured":"Hertzmann, A., Jacobs, C.E., Oliver, N., Curless, B., Salesin, D.H.: Image analogies. In: Proceedings of the 28th Annual Conference on Computer Graphics and Interactive Techniques, ACM, pp. 327\u2013340 (2001)","DOI":"10.1145\/383259.383295"},{"key":"1223_CR15","doi-asserted-by":"publisher","first-page":"640","DOI":"10.1109\/TCI.2020.2965304","volume":"6","author":"R Hou","year":"2020","unstructured":"Hou, R., Zhou, D., Nie, R., Liu, D., Xiong, L., Guo, Y., Yu, C.: Vif-net: an unsupervised framework for infrared and visible image fusion. IEEE Trans. Comput. Imaging 6, 640\u2013651 (2020)","journal-title":"IEEE Trans. Comput. Imaging"},{"key":"1223_CR16","doi-asserted-by":"crossref","unstructured":"Huang, X., Liu, M.Y., Belongie, S., Kautz, J.: Multimodal unsupervised image-to-image translation. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 172\u2013189 (2018)","DOI":"10.1007\/978-3-030-01219-9_11"},{"key":"1223_CR17","doi-asserted-by":"crossref","unstructured":"Hwang, S., Park, J., Kim, N., Choi, Y., So\u00a0Kweon, I.: Multispectral pedestrian detection: Benchmark dataset and baseline. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1037\u20131045 (2015)","DOI":"10.1109\/CVPR.2015.7298706"},{"key":"1223_CR18","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1125\u20131134 (2017)","DOI":"10.1109\/CVPR.2017.632"},{"key":"1223_CR19","doi-asserted-by":"crossref","unstructured":"Kandel, I., Castelli, M.: The effect of batch size on the generalizability of the convolutional neural networks on a histopathology dataset. ICT Express (2020)","DOI":"10.1016\/j.icte.2020.04.010"},{"issue":"4","key":"1223_CR20","first-page":"111","volume":"1","author":"B Karlik","year":"2011","unstructured":"Karlik, B., Olgac, A.V.: Performance analysis of various activation functions in generalized MLP architectures of neural networks. Int. J. Artif. Intell. Expert Syst. 1(4), 111\u2013122 (2011)","journal-title":"Int. J. Artif. Intell. Expert Syst."},{"key":"1223_CR21","unstructured":"Kingma, D.P., Salimans, T., Welling, M.: Variational dropout and the local reparameterization trick. In: Advances in Neural Information Processing Systems, pp. 2575\u20132583 (2015)"},{"issue":"4","key":"1223_CR22","doi-asserted-by":"publisher","first-page":"149","DOI":"10.1145\/2601097.2601101","volume":"33","author":"PY Laffont","year":"2014","unstructured":"Laffont, P.Y., Ren, Z., Tao, X., Qian, C., Hays, J.: Transient attributes for high-level understanding and editing of outdoor scenes. ACM Trans. Gr. 33(4), 149 (2014)","journal-title":"ACM Trans. Gr."},{"key":"1223_CR23","doi-asserted-by":"crossref","unstructured":"Le, Q.V.: Building high-level features using large scale unsupervised learning. In: 2013 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 8595\u20138598 (2013)","DOI":"10.1109\/ICASSP.2013.6639343"},{"key":"1223_CR24","doi-asserted-by":"crossref","unstructured":"Ledig, C., Theis, L., Husz\u00e1r, F., Caballero, J., Cunningham, A., Acosta, A., Aitken, A., Tejani, A., Totz, J., Wang, Z., et\u00a0al.: Photo-realistic single image super-resolution using a generative adversarial network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4681\u20134690 (2017)","DOI":"10.1109\/CVPR.2017.19"},{"key":"1223_CR25","doi-asserted-by":"crossref","unstructured":"Lee, H.Y., Tseng, H.Y., Huang, J.B., Singh, M., Yang, M.H.: Diverse image-to-image translation via disentangled representations. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 35\u201351 (2018)","DOI":"10.1007\/978-3-030-01246-5_3"},{"key":"1223_CR26","doi-asserted-by":"publisher","first-page":"15","DOI":"10.1016\/j.patrec.2018.01.010","volume":"104","author":"C Li","year":"2018","unstructured":"Li, C., Guo, J., Porikli, F., Pang, Y.: Lightennet: a convolutional neural network for weakly illuminated image enhancement. Pattern Recognit. Lett. 104, 15\u201322 (2018)","journal-title":"Pattern Recognit. Lett."},{"key":"1223_CR27","doi-asserted-by":"crossref","unstructured":"Li, C., Wand, M.: Precomputed real-time texture synthesis with Markovian generative adversarial networks. In: European Conference on Computer Vision, Springer, pp. 702\u2013716 (2016)","DOI":"10.1007\/978-3-319-46487-9_43"},{"key":"1223_CR28","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TIP.2018.2877829","volume":"28","author":"H Li","year":"2019","unstructured":"Li, H., Wu, X.: Densefuse: a fusion approach to infrared and visible images. IEEE Trans. Image Process. 28, 1\u201310 (2019)","journal-title":"IEEE Trans. Image Process."},{"key":"1223_CR29","doi-asserted-by":"crossref","unstructured":"Li, H., Wu, X., Kittler, J.: Infrared and visible image fusion using a deep learning framework. In: 2018 24th International Conference on Pattern Recognition (ICPR) pp. 2705\u20132710 (2018)","DOI":"10.1109\/ICPR.2018.8546006"},{"key":"1223_CR30","doi-asserted-by":"publisher","first-page":"4733","DOI":"10.1109\/TIP.2020.2975984","volume":"29","author":"H Li","year":"2020","unstructured":"Li, H., Wu, X.J., Kittler, J.: Mdlatlrr: a novel decomposition method for infrared and visible image fusion. IEEE Trans. Image Process. 29, 4733\u20134746 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"1223_CR31","doi-asserted-by":"publisher","first-page":"100","DOI":"10.1016\/j.inffus.2016.05.004","volume":"33","author":"S Li","year":"2017","unstructured":"Li, S., Kang, X., Fang, L., Hu, J., Yin, H.: Pixel-level image fusion: a survey of the state of the art. Inf. Fusion 33, 100\u2013112 (2017)","journal-title":"Inf. Fusion"},{"issue":"4","key":"1223_CR32","doi-asserted-by":"publisher","first-page":"8","DOI":"10.1109\/MGRS.2019.2921780","volume":"7","author":"X Li","year":"2019","unstructured":"Li, X., Feng, R., Guan, X., Shen, H., Zhang, L.: Remote sensing image mosaicking: achievements and challenges. IEEE Geosci. Remote Sens. Mag. 7(4), 8\u201322 (2019)","journal-title":"IEEE Geosci. Remote Sens. Mag."},{"key":"1223_CR33","doi-asserted-by":"publisher","first-page":"443","DOI":"10.1109\/TNNLS.2020.2978389","volume":"32","author":"X Li","year":"2020","unstructured":"Li, X., Zhang, R., Wang, Q., Zhang, H.: Autoencoder constrained clustering with adaptive neighbors. IEEE Trans. Neural Netw. Learn. Syst. 32, 443 (2020)","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"1223_CR34","doi-asserted-by":"crossref","unstructured":"Limmer, M., Lensch, H.P.: Infrared colorization using deep convolutional neural networks. In: 2016 15th IEEE International Conference on Machine Learning and Applications (ICMLA), IEEE, pp. 61\u201368 (2016)","DOI":"10.1109\/ICMLA.2016.0019"},{"key":"1223_CR35","unstructured":"Liu, M.Y., Breuel, T., Kautz, J.: Unsupervised image-to-image translation networks. In: Advances in Neural Information Processing Systems, pp. 700\u2013708 (2017)"},{"key":"1223_CR36","unstructured":"Liu, M.Y., Tuzel, O.: Coupled generative adversarial networks. In: Advances in Neural Information Processing Systems, pp. 469\u2013477 (2016)"},{"key":"1223_CR37","doi-asserted-by":"publisher","first-page":"147","DOI":"10.1016\/j.inffus.2014.09.004","volume":"24","author":"Y Liu","year":"2015","unstructured":"Liu, Y., Liu, S., Wang, Z.: A general framework for image fusion based on multi-scale transform and sparse representation. Inf. Fusion 24, 147\u2013164 (2015)","journal-title":"Inf. Fusion"},{"key":"1223_CR38","doi-asserted-by":"crossref","unstructured":"Long, J., Shelhamer, E., Darrell, T.: Fully convolutional networks for semantic segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3431\u20133440 (2015)","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"1223_CR39","doi-asserted-by":"publisher","first-page":"650","DOI":"10.1016\/j.patcog.2016.06.008","volume":"61","author":"KG Lore","year":"2017","unstructured":"Lore, K.G., Akintayo, A., Sarkar, S.: Llnet: a deep autoencoder approach to natural low-light image enhancement. Pattern Recognit. 61, 650\u2013662 (2017)","journal-title":"Pattern Recognit."},{"key":"1223_CR40","doi-asserted-by":"publisher","first-page":"164","DOI":"10.1016\/j.neucom.2016.08.059","volume":"218","author":"G Ma","year":"2016","unstructured":"Ma, G., Yang, X., Zhang, B., Shi, Z.: Multi-feature fusion deep networks. Neurocomputing 218, 164\u2013171 (2016)","journal-title":"Neurocomputing"},{"key":"1223_CR41","doi-asserted-by":"publisher","first-page":"100","DOI":"10.1016\/j.inffus.2016.02.001","volume":"31","author":"J Ma","year":"2016","unstructured":"Ma, J., Chen, C., Li, C., Huang, J.: Infrared and visible image fusion via gradient transfer and total variation minimization. Inf. Fusion 31, 100\u2013109 (2016)","journal-title":"Inf. Fusion"},{"key":"1223_CR42","doi-asserted-by":"publisher","first-page":"153","DOI":"10.1016\/j.inffus.2018.02.004","volume":"45","author":"J Ma","year":"2019","unstructured":"Ma, J., Ma, Y., Li, C.: Infrared and visible image fusion methods and applications: a survey. Inf. Fusion 45, 153\u2013178 (2019)","journal-title":"Inf. Fusion"},{"key":"1223_CR43","doi-asserted-by":"publisher","first-page":"11","DOI":"10.1016\/j.inffus.2018.09.004","volume":"48","author":"J Ma","year":"2019","unstructured":"Ma, J., Yu, W., Liang, P., Li, C., Jiang, J.: Fusiongan: a generative adversarial network for infrared and visible image fusion. Inf. Fusion 48, 11\u201326 (2019)","journal-title":"Inf. Fusion"},{"key":"1223_CR44","doi-asserted-by":"publisher","first-page":"8","DOI":"10.1016\/j.infrared.2017.02.005","volume":"82","author":"J Ma","year":"2017","unstructured":"Ma, J., Zhou, Z., Wang, B., Zong, H.: Infrared and visible image fusion based on visual saliency map and weighted least square optimization. Infrared Phys. Technol. 82, 8\u201317 (2017)","journal-title":"Infrared Phys. Technol."},{"key":"1223_CR45","unstructured":"Maas, A.L., Hannun, A.Y., Ng, A.Y.: Rectifier nonlinearities improve neural network acoustic models. In: Proceedings of the ICML, vol.\u00a030, p.\u00a03 (2013)"},{"issue":"12","key":"1223_CR46","doi-asserted-by":"publisher","first-page":"4695","DOI":"10.1109\/TIP.2012.2214050","volume":"21","author":"A Mittal","year":"2012","unstructured":"Mittal, A., Moorthy, A.K., Bovik, A.C.: No-reference image quality assessment in the spatial domain. IEEE Trans. Image Process. 21(12), 4695\u20134708 (2012)","journal-title":"IEEE Trans. Image Process."},{"issue":"3","key":"1223_CR47","doi-asserted-by":"publisher","first-page":"209","DOI":"10.1109\/LSP.2012.2227726","volume":"20","author":"A Mittal","year":"2012","unstructured":"Mittal, A., Soundararajan, R., Bovik, A.C.: Making a \u201ccompletely blind\u201d image quality analyzer. IEEE Signal Process. Lett. 20(3), 209\u2013212 (2012)","journal-title":"IEEE Signal Process. Lett."},{"key":"1223_CR48","unstructured":"Nair, V., Hinton, G.E.: Rectified linear units improve restricted Boltzmann machines. In: Proceedings of the 27th International Conference on Machine Learning (ICML-10), pp. 807\u2013814 (2010)"},{"key":"1223_CR49","unstructured":"Ohkawa, T., Inoue, N., Kataoka, H., Inoue, N.: Augmented cyclic consistency regularization for unpaired image-to-image translation. arXiv preprint arXiv:2003.00187 (2020)"},{"key":"1223_CR50","doi-asserted-by":"crossref","unstructured":"Pang, H., Zhu, M., Guo, L.: Multifocus color image fusion using quaternion wavelet transform. In: 2012 5th International Congress on Image and Signal Processing (CISP), IEEE, pp. 543\u2013546 (2012)","DOI":"10.1109\/CISP.2012.6469884"},{"key":"1223_CR51","doi-asserted-by":"crossref","unstructured":"Park, T., Liu, M.Y., Wang, T.C., Zhu, J.Y.: Semantic image synthesis with spatially-adaptive normalization. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2337\u20132346 (2019)","DOI":"10.1109\/CVPR.2019.00244"},{"key":"1223_CR52","doi-asserted-by":"crossref","unstructured":"Patel, H., Prajapati, K., Chudasama, V., Upla, K.P.: An approach for fusion of thermal and visible images. In: International Conference on Emerging Technology Trends in Electronics Communication and Networking, Springer, pp. 225\u2013234 (2020)","DOI":"10.1007\/978-981-15-7219-7_19"},{"key":"1223_CR53","doi-asserted-by":"crossref","unstructured":"Pathak, D., Krahenbuhl, P., Donahue, J., Darrell, T., Efros, A.A.: Context encoders: feature learning by inpainting. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2536\u20132544 (2016)","DOI":"10.1109\/CVPR.2016.278"},{"key":"1223_CR54","doi-asserted-by":"crossref","unstructured":"Rajkumar, S., Mouli, C.: Infrared and visible image fusion using entropy and neuro-fuzzy concepts. In: ICT and Critical Infrastructure: Proceedings of the 48th Annual Convention of Computer Society of India, vol. I, Springer, pp. 93\u2013100 (2014)","DOI":"10.1007\/978-3-319-03107-1_11"},{"issue":"8","key":"1223_CR55","doi-asserted-by":"publisher","first-page":"3339","DOI":"10.1109\/TIP.2012.2191563","volume":"21","author":"MA Saad","year":"2012","unstructured":"Saad, M.A., Bovik, A.C., Charrier, C.: Blind image quality assessment: a natural scene statistics approach in the DCT domain. IEEE Trans. Image Process. 21(8), 3339\u20133352 (2012)","journal-title":"IEEE Trans. Image Process."},{"key":"1223_CR56","doi-asserted-by":"crossref","unstructured":"Stout, A., Madineni, K., Tremblay, L., Tane, Z.: The development of synthetic thermal image generation tools and training data at flir. In: Automatic Target Recognition XXIX, International Society for Optics and Photonics, vol. 10988, p. 1098814 (2019)","DOI":"10.1117\/12.2518655"},{"key":"1223_CR57","doi-asserted-by":"crossref","unstructured":"Su\u00e1rez, P.L., Sappa, A.D., Vintimilla, B.X.: Infrared image colorization based on a triplet DCGAN architecture. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 18\u201323 (2017)","DOI":"10.1109\/CVPRW.2017.32"},{"key":"1223_CR58","doi-asserted-by":"crossref","unstructured":"Su\u00e1rez, P.L., Sappa, A.D., Vintimilla, B.X.: Learning to colorize infrared images. In: International Conference on Practical Applications of Agents and Multi-Agent Systems, Springer, pp. 164\u2013172 (2017)","DOI":"10.1007\/978-3-319-61578-3_16"},{"key":"1223_CR59","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., Erhan, D., Vanhoucke, V., Rabinovich, A.: Going deeper with convolutions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1\u20139 (2015)","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"1223_CR60","doi-asserted-by":"crossref","unstructured":"Tao, L., Zhu, C., Song, J., Lu, T., Jia, H., Xie, X.: Low-light image enhancement using CNN and bright channel prior. In: 2017 IEEE International Conference on Image Processing (ICIP), IEEE, pp. 3215\u20133219 (2017)","DOI":"10.1109\/ICIP.2017.8296876"},{"key":"1223_CR61","doi-asserted-by":"crossref","unstructured":"Wang, T.C., Liu, M.Y., Zhu, J.Y., Tao, A., Kautz, J., Catanzaro, B.: High-resolution image synthesis and semantic manipulation with conditional GANs. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8798\u20138807 (2018)","DOI":"10.1109\/CVPR.2018.00917"},{"key":"1223_CR62","doi-asserted-by":"crossref","unstructured":"Wang, X., Nie, R., Guo, X.: Two-scale image fusion of visible and infrared images using guided filter. In: Proceedings of the 7th International Conference on Informatics, Environment, Energy and Applications, ACM, pp. 217\u2013221 (2018)","DOI":"10.1145\/3208854.3208881"},{"issue":"2","key":"1223_CR63","doi-asserted-by":"publisher","first-page":"554","DOI":"10.3390\/app10020554","volume":"10","author":"D Xu","year":"2020","unstructured":"Xu, D., Wang, Y., Xu, S., Zhu, K., Zhang, N., Zhang, X.: Infrared and visible image fusion with a generative adversarial network and a residual network. Appl. Sci. 10(2), 554 (2020)","journal-title":"Appl. Sci."},{"issue":"22","key":"1223_CR64","doi-asserted-by":"publisher","first-page":"4780","DOI":"10.3390\/app9224780","volume":"9","author":"J Yoo","year":"2019","unstructured":"Yoo, J., Eom, H., Choi, Y.S.: Image-to-image translation using a cross-domain auto-encoder and decoder. Appl. Sci. 9(22), 4780 (2019)","journal-title":"Appl. Sci."},{"key":"1223_CR65","doi-asserted-by":"crossref","unstructured":"Zhang, C., Wang, K., An, Y., He, K., Tong, T., Tian, J.: Improved generative adversarial networks using the total gradient loss for the resolution enhancement of fluorescence images. Biomed. Opt. Express 10(9), 4742\u20134756 (2019)","DOI":"10.1364\/BOE.10.004742"},{"key":"1223_CR66","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A.: Colorful image colorization. In: European Conference on Computer Vision, Springer, pp. 649\u2013666 (2016)","DOI":"10.1007\/978-3-319-46487-9_40"},{"key":"1223_CR67","doi-asserted-by":"publisher","DOI":"10.1109\/TFUZZ.2020.3026834","author":"R Zhang","year":"2020","unstructured":"Zhang, R., Li, X.: Regularized regression with fuzzy membership embedding for unsupervised feature selection. IEEE Trans. Fuzzy Syst. (2020). https:\/\/doi.org\/10.1109\/TFUZZ.2020.3026834","journal-title":"IEEE Trans. Fuzzy Syst."},{"key":"1223_CR68","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3069498","author":"R Zhang","year":"2021","unstructured":"Zhang, R., Li, X., Zhang, H., Jiao, Z.: Geodesic multi-class SVM with stiefel manifold embedding. IEEE Trans. Pattern Anal. Mach. Intell. (2021). https:\/\/doi.org\/10.1109\/TPAMI.2021.3069498","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"11","key":"1223_CR69","doi-asserted-by":"publisher","first-page":"2814","DOI":"10.1109\/TFUZZ.2019.2945232","volume":"28","author":"R Zhang","year":"2019","unstructured":"Zhang, R., Li, X., Zhang, H., Nie, F.: Deep fuzzy k-means with adaptive loss and entropy regularization. IEEE Trans. Fuzzy Syst. 28(11), 2814\u20132824 (2019)","journal-title":"IEEE Trans. Fuzzy Syst."},{"key":"1223_CR70","unstructured":"Zhang, R., Tong, H.: Robust principal component analysis with adaptive neighbors. In: NeuIPS (2019)"},{"key":"1223_CR71","doi-asserted-by":"crossref","unstructured":"Zhang, R., Tong, H., Xia, Y., Zhu, Y.: Robust embedded deep k-means clustering. In: Proceedings of the 28th ACM International Conference on Information and Knowledge Management, pp. 1181\u20131190 (2019)","DOI":"10.1145\/3357384.3357985"},{"key":"1223_CR72","doi-asserted-by":"publisher","first-page":"2150","DOI":"10.1109\/TPAMI.2020.3007637","volume":"43","author":"R Zhang","year":"2020","unstructured":"Zhang, R., Zhang, H., Li, X.: Robust multi-task learning with flexible manifold constraint. IEEE Trans. Pattern Anal. Mach. Intell. 43, 2150 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"8","key":"1223_CR73","doi-asserted-by":"publisher","first-page":"1400","DOI":"10.1364\/JOSAA.34.001400","volume":"34","author":"X Zhang","year":"2017","unstructured":"Zhang, X., Ma, Y., Fan, F., Zhang, Y., Huang, J.: Infrared and visible image fusion via saliency analysis and local edge-preserving multi-scale decomposition. JOSA A 34(8), 1400\u20131410 (2017)","journal-title":"JOSA A"},{"key":"1223_CR74","doi-asserted-by":"publisher","first-page":"86","DOI":"10.1016\/j.infrared.2013.11.008","volume":"62","author":"J Zhao","year":"2014","unstructured":"Zhao, J., Chen, Y., Feng, H., Xu, Z., Li, Q.: Infrared image enhancement through saliency feature analysis based on multi-scale decomposition. Infrared Phys. Technol. 62, 86\u201393 (2014)","journal-title":"Infrared Phys. Technol."},{"key":"1223_CR75","doi-asserted-by":"publisher","first-page":"201","DOI":"10.1016\/j.infrared.2017.01.012","volume":"81","author":"J Zhao","year":"2017","unstructured":"Zhao, J., Cui, G., Gong, X., Zang, Y., Tao, S., Wang, D.: Fusion of visible and infrared images using global entropy and gradient constrained regularization. Infrared Phys. Technol. 81, 201\u2013209 (2017)","journal-title":"Infrared Phys. Technol."},{"key":"1223_CR76","volume-title":"Multispectral Image Fusion and Colorization","author":"Y Zheng","year":"2018","unstructured":"Zheng, Y., Blasch, E., Liu, Z.: Multispectral Image Fusion and Colorization, vol. 481. SPIE Press, Bellingham (2018)"},{"key":"1223_CR77","doi-asserted-by":"crossref","unstructured":"Zhou, Y., Berg, T.L.: Learning temporal transformations from time-lapse videos. In: European Conference on Computer Vision, Springer, pp. 262\u2013277 (2016)","DOI":"10.1007\/978-3-319-46484-8_16"},{"key":"1223_CR78","doi-asserted-by":"crossref","unstructured":"Zhu, J.Y., Kr\u00e4henb\u00fchl, P., Shechtman, E., Efros, A.A.: Generative visual manipulation on the natural image manifold. In: European Conference on Computer Vision, Springer, pp. 597\u2013613 (2016)","DOI":"10.1007\/978-3-319-46454-1_36"},{"key":"1223_CR79","doi-asserted-by":"crossref","unstructured":"Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2223\u20132232 (2017)","DOI":"10.1109\/ICCV.2017.244"},{"key":"1223_CR80","unstructured":"Zhu, J.Y., Zhang, R., Pathak, D., Darrell, T., Efros, A.A., Wang, O., Shechtman, E.: Toward multimodal image-to-image translation. In: Advances in Neural Information Processing Systems, pp. 465\u2013476 (2017)"},{"key":"1223_CR81","doi-asserted-by":"publisher","first-page":"195","DOI":"10.1016\/j.bspc.2017.02.005","volume":"34","author":"JJ Zong","year":"2017","unstructured":"Zong, J.J., Qiu, T.S.: Medical image fusion based on sparse representation of classified image patches. Biomed. Signal Process. Control 34, 195\u2013205 (2017)","journal-title":"Biomed. Signal Process. Control"}],"container-title":["Machine Vision and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-021-01223-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00138-021-01223-4\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00138-021-01223-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,2]],"date-time":"2024-09-02T19:00:45Z","timestamp":1725303645000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00138-021-01223-4"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,6,26]]},"references-count":81,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2021,7]]}},"alternative-id":["1223"],"URL":"https:\/\/doi.org\/10.1007\/s00138-021-01223-4","relation":{},"ISSN":["0932-8092","1432-1769"],"issn-type":[{"type":"print","value":"0932-8092"},{"type":"electronic","value":"1432-1769"}],"subject":[],"published":{"date-parts":[[2021,6,26]]},"assertion":[{"value":"19 August 2020","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 May 2021","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 June 2021","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"26 June 2021","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"99"}}