{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T20:28:46Z","timestamp":1774988926343,"version":"3.50.1"},"reference-count":40,"publisher":"Springer Science and Business Media LLC","issue":"6","license":[{"start":{"date-parts":[[2024,11,6]],"date-time":"2024-11-06T00:00:00Z","timestamp":1730851200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,6]],"date-time":"2024-11-06T00:00:00Z","timestamp":1730851200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["92059207, 92359301, 62027901, 81930053, 81227901"],"award-info":[{"award-number":["92059207, 92359301, 62027901, 81930053, 81227901"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"CAS Youth Interdisciplinary Team","award":["JCTD-2021-08"],"award-info":[{"award-number":["JCTD-2021-08"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1007\/s00530-024-01533-3","type":"journal-article","created":{"date-parts":[[2024,11,6]],"date-time":"2024-11-06T16:07:04Z","timestamp":1730909224000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Universal NIR-II fluorescence image enhancement via covariance weighted attention network"],"prefix":"10.1007","volume":"30","author":[{"given":"Xiaoming","family":"Yu","sequence":"first","affiliation":[]},{"given":"Jie","family":"Tian","sequence":"additional","affiliation":[]},{"given":"Zhenhua","family":"Hu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,6]]},"reference":[{"issue":"6","key":"1533_CR1","doi-asserted-by":"publisher","first-page":"2213","DOI":"10.1109\/TMI.2020.2964853","volume":"39","author":"M Cai","year":"2020","unstructured":"Cai, M., Zhang, Z., Shi, X., Hu, Z., Tian, J.: Nir-ii\/nir-i fluorescence molecular tomography of heterogeneous mice based on gaussian weighted neighborhood fused lasso method. IEEE Trans. Med. Imaging 39(6), 2213\u20132222 (2020)","journal-title":"IEEE Trans. Med. Imaging"},{"issue":"10","key":"1533_CR2","doi-asserted-by":"publisher","first-page":"3207","DOI":"10.1109\/TMI.2020.2987640","volume":"39","author":"M Cai","year":"2020","unstructured":"Cai, M., Zhang, Z., Shi, X., Yang, J., Hu, Z., Tian, J.: Non-negative iterative convex refinement approach for accurate and robust reconstruction in cerenkov luminescence tomography. IEEE Trans. Med. Imaging 39(10), 3207\u20133217 (2020)","journal-title":"IEEE Trans. Med. Imaging"},{"issue":"8","key":"1533_CR3","doi-asserted-by":"publisher","first-page":"2531","DOI":"10.1007\/s00259-022-05730-y","volume":"49","author":"Z Zhang","year":"2022","unstructured":"Zhang, Z., He, K., Chi, C., Hu, Z., Tian, J.: Intraoperative fluorescence molecular imaging accelerates the coming of precision surgery in china. Eur. J. Nucl. Med. Mol. Imaging 49(8), 2531\u20132543 (2022)","journal-title":"Eur. J. Nucl. Med. Mol. Imaging"},{"issue":"8","key":"1533_CR4","doi-asserted-by":"publisher","first-page":"2404","DOI":"10.1109\/TBME.2022.3143859","volume":"69","author":"C Cao","year":"2022","unstructured":"Cao, C., Jin, Z., Shi, X., Zhang, Z., Xiao, A., Yang, J., Ji, N., Tian, J., Hu, Z.: First clinical investigation of near-infrared window iia\/iib fluorescence imaging for precise surgical resection of gliomas. IEEE Trans. Biomed. Eng. 69(8), 2404\u20132413 (2022)","journal-title":"IEEE Trans. Biomed. Eng."},{"issue":"3","key":"1533_CR5","doi-asserted-by":"publisher","first-page":"161","DOI":"10.1038\/s44222-022-00017-1","volume":"1","author":"K Wang","year":"2023","unstructured":"Wang, K., Du, Y., Zhang, Z., He, K., Cheng, Z., Yin, L., Dong, D., Li, C., Li, W., Hu, Z., et al.: Fluorescence image-guided tumour surgery. Nature Reviews Bioengineering 1(3), 161\u2013179 (2023)","journal-title":"Nature Reviews Bioengineering"},{"key":"1533_CR6","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1186\/s42492-018-0001-6","volume":"1","author":"Y An","year":"2018","unstructured":"An, Y., Wang, K., Tian, J.: Recent methodology advances in fluorescence molecular tomography. Visual Computing for Industry, Biomedicine, and Art 1, 1\u201311 (2018)","journal-title":"Visual Computing for Industry, Biomedicine, and Art"},{"issue":"2","key":"1533_CR7","doi-asserted-by":"publisher","first-page":"381","DOI":"10.1109\/TMI.2015.2475356","volume":"35","author":"P Mohajerani","year":"2015","unstructured":"Mohajerani, P., Ntziachristos, V.: An inversion scheme for hybrid fluorescence molecular tomography using a fuzzy inference system. IEEE Trans. Med. Imaging 35(2), 381\u2013390 (2015)","journal-title":"IEEE Trans. Med. Imaging"},{"issue":"5","key":"1533_CR8","doi-asserted-by":"publisher","first-page":"629","DOI":"10.1038\/s41551-021-00773-2","volume":"6","author":"B Chang","year":"2022","unstructured":"Chang, B., Li, D., Ren, Y., Qu, C., Shi, X., Liu, R., Liu, H., Tian, J., Hu, Z., Sun, T., et al.: A phosphorescent probe for in vivo imaging in the second near-infrared window. Nature Biomedical Engineering 6(5), 629\u2013639 (2022)","journal-title":"Nature Biomedical Engineering"},{"issue":"24","key":"1533_CR9","doi-asserted-by":"publisher","first-page":"1900321","DOI":"10.1002\/adma.201900321","volume":"31","author":"S Zhu","year":"2019","unstructured":"Zhu, S., Tian, R., Antaris, A.L., Chen, X., Dai, H.: Near-infrared-ii molecular dyes for cancer imaging and surgery. Adv. Mater. 31(24), 1900321 (2019)","journal-title":"Adv. Mater."},{"issue":"3","key":"1533_CR10","doi-asserted-by":"publisher","first-page":"259","DOI":"10.1038\/s41551-019-0494-0","volume":"4","author":"Z Hu","year":"2020","unstructured":"Hu, Z., Fang, C., Li, B., Zhang, Z., Cao, C., Cai, M., Su, S., Sun, X., Shi, X., Li, C., et al.: First-in-human liver-tumour surgery guided by multispectral fluorescence imaging in the visible and near-infrared-i\/ii windows. Nature biomedical engineering 4(3), 259\u2013271 (2020)","journal-title":"Nature biomedical engineering"},{"issue":"1","key":"1533_CR11","doi-asserted-by":"publisher","first-page":"47","DOI":"10.1038\/nnano.2006.170","volume":"2","author":"Z Liu","year":"2007","unstructured":"Liu, Z., Cai, W., He, L., Nakayama, N., Chen, K., Sun, X., Chen, X., Dai, H.: In vivo biodistribution and highly efficient tumour targeting of carbon nanotubes in mice. Nat. Nanotechnol. 2(1), 47\u201352 (2007)","journal-title":"Nat. Nanotechnol."},{"key":"1533_CR12","doi-asserted-by":"crossref","unstructured":"Huang, X., Liu, M.-Y., Belongie, S., Kautz, J.: Multimodal unsupervised image-to-image translation. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 172\u2013189 (2018)","DOI":"10.1007\/978-3-030-01219-9_11"},{"key":"1533_CR13","doi-asserted-by":"crossref","unstructured":"Chang, H.-Y., Wang, Z., Chuang, Y.-Y.: Domain-specific mappings for generative adversarial style transfer. In: Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part VIII 16, pp. 573\u2013589 (2020). Springer","DOI":"10.1007\/978-3-030-58598-3_34"},{"key":"1533_CR14","doi-asserted-by":"crossref","unstructured":"Huang, X., Belongie, S.: Arbitrary style transfer in real-time with adaptive instance normalization. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 1501\u20131510 (2017)","DOI":"10.1109\/ICCV.2017.167"},{"key":"1533_CR15","doi-asserted-by":"crossref","unstructured":"Park, D.Y., Lee, K.H.: Arbitrary style transfer with style-attentional networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5880\u20135888 (2019)","DOI":"10.1109\/CVPR.2019.00603"},{"key":"1533_CR16","doi-asserted-by":"crossref","unstructured":"Gatys, L.A., Ecker, A.S., Bethge, M.: Image style transfer using convolutional neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2414\u20132423 (2016)","DOI":"10.1109\/CVPR.2016.265"},{"key":"1533_CR17","doi-asserted-by":"crossref","unstructured":"Li, X., Liu, S., Kautz, J., Yang, M.-H.: Learning linear transformations for fast image and video style transfer. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3809\u20133817 (2019)","DOI":"10.1109\/CVPR.2019.00393"},{"key":"1533_CR18","doi-asserted-by":"crossref","unstructured":"Deng, Y., Tang, F., Dong, W., Sun, W., Huang, F., Xu, C.: Arbitrary style transfer via multi-adaptation network. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 2719\u20132727 (2020)","DOI":"10.1145\/3394171.3414015"},{"issue":"2","key":"1533_CR19","doi-asserted-by":"publisher","first-page":"96","DOI":"10.1007\/s00530-024-01300-4","volume":"30","author":"B Ge","year":"2024","unstructured":"Ge, B., Hu, Z., Xia, C., Guan, J.: Arbitrary style transfer method with attentional feature distribution matching. Multimedia Syst. 30(2), 96 (2024)","journal-title":"Multimedia Syst."},{"key":"1533_CR20","doi-asserted-by":"crossref","unstructured":"Lu, Z., Li, J., Liu, H., Huang, C., Zhang, L., Zeng, T.: Transformer for single image super-resolution. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 457\u2013466 (2022)","DOI":"10.1109\/CVPRW56347.2022.00061"},{"issue":"2","key":"1533_CR21","doi-asserted-by":"publisher","first-page":"593","DOI":"10.1109\/TCE.2007.381734","volume":"53","author":"M Abdullah-Al-Wadud","year":"2007","unstructured":"Abdullah-Al-Wadud, M., Kabir, M.H., Dewan, M.A.A., Chae, O.: A dynamic histogram equalization for image contrast enhancement. IEEE Trans. Consum. Electron. 53(2), 593\u2013600 (2007)","journal-title":"IEEE Trans. Consum. Electron."},{"key":"1533_CR22","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J.-Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1125\u20131134 (2017)","DOI":"10.1109\/CVPR.2017.632"},{"key":"1533_CR23","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Li, K., Li, K., Fu, Y.: Mr image super-resolution with squeeze and excitation reasoning attention network. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13425\u201313434 (2021)","DOI":"10.1109\/CVPR46437.2021.01322"},{"key":"1533_CR24","unstructured":"Luthra, A., Sulakhe, H., Mittal, T., Iyer, A., Yadav, S.: Eformer: Edge enhancement based transformer for medical image denoising. arXiv preprint arXiv:2109.08044 (2021)"},{"key":"1533_CR25","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Zhu, Y., Nichols, E., Wang, Q., Zhang, S., Smith, C., Howard, S.: A poisson-gaussian denoising dataset with real fluorescence microscopy images. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11710\u201311718 (2019)","DOI":"10.1109\/CVPR.2019.01198"},{"issue":"12","key":"1533_CR26","doi-asserted-by":"publisher","first-page":"1407","DOI":"10.1038\/s41551-022-00952-9","volume":"6","author":"KB Ozyoruk","year":"2022","unstructured":"Ozyoruk, K.B., Can, S., Darbaz, B., Ba\u015fak, K., Demir, D., Gokceler, G.I., Serin, G., Hacisalihoglu, U.P., Kurtulu\u015f, E., Lu, M.Y., et al.: A deep-learning model for transforming the style of tissue images from cryosectioned to formalin-fixed and paraffin-embedded. Nature Biomedical Engineering 6(12), 1407\u20131419 (2022)","journal-title":"Nature Biomedical Engineering"},{"issue":"1","key":"1533_CR27","doi-asserted-by":"publisher","first-page":"2021446118","DOI":"10.1073\/pnas.2021446118","volume":"118","author":"Z Ma","year":"2021","unstructured":"Ma, Z., Wang, F., Wang, W., Zhong, Y., Dai, H.: Deep learning for in vivo near-infrared imaging. Proc. Natl. Acad. Sci. 118(1), 2021446118 (2021)","journal-title":"Proc. Natl. Acad. Sci."},{"key":"1533_CR28","doi-asserted-by":"crossref","unstructured":"Mathew, S., Nadeem, S., Kumari, S., Kaufman, A.: Augmenting colonoscopy using extended and directional cyclegan for lossy image translation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4696\u20134705 (2020)","DOI":"10.1109\/CVPR42600.2020.00475"},{"key":"1533_CR29","doi-asserted-by":"crossref","unstructured":"Johnson, J., Alahi, A., Fei-Fei, L.: Perceptual losses for real-time style transfer and super-resolution. In: Computer Vision\u2013ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pp. 694\u2013711 (2016). Springer","DOI":"10.1007\/978-3-319-46475-6_43"},{"key":"1533_CR30","doi-asserted-by":"crossref","unstructured":"Zhang, H., Dana, K.: Multi-style generative network for real-time transfer. In: Proceedings of the European Conference on Computer Vision (ECCV) Workshops, pp. 0\u20130 (2018)","DOI":"10.1007\/978-3-030-11018-5_32"},{"key":"1533_CR31","doi-asserted-by":"crossref","unstructured":"Jing, Y., Liu, X., Ding, Y., Wang, X., Ding, E., Song, M., Wen, S.: Dynamic instance normalization for arbitrary style transfer. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, pp. 4369\u20134376 (2020)","DOI":"10.1609\/aaai.v34i04.5862"},{"key":"1533_CR32","doi-asserted-by":"crossref","unstructured":"Xu, W., Long, C., Wang, R., Wang, G.: Drb-gan: A dynamic resblock generative adversarial network for artistic style transfer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6383\u20136392 (2021)","DOI":"10.1109\/ICCV48922.2021.00632"},{"key":"1533_CR33","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Li, M., Li, R., Jia, K., Zhang, L.: Exact feature distribution matching for arbitrary style transfer and domain generalization. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8035\u20138045 (2022)","DOI":"10.1109\/CVPR52688.2022.00787"},{"key":"1533_CR34","doi-asserted-by":"crossref","unstructured":"Liu, S., Lin, T., He, D., Li, F., Wang, M., Li, X., Sun, Z., Li, Q., Ding, E.: Adaattn: Revisit attention mechanism in arbitrary neural style transfer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6649\u20136658 (2021)","DOI":"10.1109\/ICCV48922.2021.00658"},{"key":"1533_CR35","doi-asserted-by":"crossref","unstructured":"Ma, Z., Lin, T., Li, X., Li, F., He, D., Ding, E., Wang, N., Gao, X.: Dual-affinity style embedding network for semantic-aligned image style transfer. IEEE transactions on neural networks and learning systems (2022)","DOI":"10.1109\/TNNLS.2022.3143356"},{"key":"1533_CR36","doi-asserted-by":"crossref","unstructured":"Deng, Y., Tang, F., Dong, W., Huang, H., Ma, C., Xu, C.: Arbitrary video style transfer via multi-channel correlation. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, pp. 1210\u20131217 (2021)","DOI":"10.1609\/aaai.v35i2.16208"},{"key":"1533_CR37","doi-asserted-by":"crossref","unstructured":"Wang, Z., Zhao, L., Zuo, Z., Li, A., Chen, H., Xing, W., Lu, D.: Microast: Towards super-fast ultra-resolution arbitrary style transfer. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 37, pp. 2742\u20132750 (2023)","DOI":"10.1609\/aaai.v37i3.25374"},{"key":"1533_CR38","doi-asserted-by":"crossref","unstructured":"Wu, Z., Zhu, Z., Du, J., Bai, X.: Ccpl: Contrastive coherence preserving loss for versatile style transfer. In: European Conference on Computer Vision, pp. 189\u2013206 (2022). Springer","DOI":"10.1007\/978-3-031-19787-1_11"},{"key":"1533_CR39","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)"},{"key":"1533_CR40","unstructured":"Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01533-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-024-01533-3\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01533-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,16]],"date-time":"2024-12-16T09:11:56Z","timestamp":1734340316000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-024-01533-3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,6]]},"references-count":40,"journal-issue":{"issue":"6","published-print":{"date-parts":[[2024,12]]}},"alternative-id":["1533"],"URL":"https:\/\/doi.org\/10.1007\/s00530-024-01533-3","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,6]]},"assertion":[{"value":"24 May 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"14 October 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"6 November 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"The datasets are open access in the relevant paper. We only use the datasets for the academic research of this paper and cite relevant paper in the main text.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical and informed consent for data used"}}],"article-number":"335"}}