{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,5]],"date-time":"2026-03-05T01:39:28Z","timestamp":1772674768378,"version":"3.50.1"},"reference-count":46,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2023,8,29]],"date-time":"2023-08-29T00:00:00Z","timestamp":1693267200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,8,29]],"date-time":"2023-08-29T00:00:00Z","timestamp":1693267200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2024,5]]},"DOI":"10.1007\/s00371-023-03059-7","type":"journal-article","created":{"date-parts":[[2023,8,29]],"date-time":"2023-08-29T11:02:13Z","timestamp":1693306933000},"page":"3693-3707","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Face image deblurring with feature correction and fusion"],"prefix":"10.1007","volume":"40","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8357-424X","authenticated-orcid":false,"given":"Ma","family":"Long","sequence":"first","affiliation":[]},{"given":"Xu","family":"Yu","sequence":"additional","affiliation":[]},{"given":"Shu","family":"Cong","sequence":"additional","affiliation":[]},{"given":"Wei","family":"Zoujian","sequence":"additional","affiliation":[]},{"given":"Du","family":"Jiangbin","sequence":"additional","affiliation":[]},{"given":"Zhao","family":"Jiayao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,8,29]]},"reference":[{"issue":"2","key":"3059_CR1","first-page":"20","volume":"6","author":"B Amos","year":"2016","unstructured":"Amos, B., Ludwiczuk, B., Satyanarayanan, M., et al.: Openface: a general-purpose face recognition library with mobile applications. CMU Sch. Comput. Sci. 6(2), 20 (2016)","journal-title":"CMU Sch. Comput. Sci."},{"key":"3059_CR2","doi-asserted-by":"crossref","unstructured":"Cao, Q., Shen, L., Xie, W., Parkhi, O.M., Zisserman, A.: Vggface2: a dataset for recognising faces across pose and age. In: 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pp. 67\u201374. IEEE (2018)","DOI":"10.1109\/FG.2018.00020"},{"issue":"6","key":"3059_CR3","doi-asserted-by":"publisher","first-page":"801","DOI":"10.1007\/s11263-018-1138-7","volume":"127","author":"GG Chrysos","year":"2019","unstructured":"Chrysos, G.G., Favaro, P., Zafeiriou, S.: Motion deblurring of faces. Int. J. Comput. Vis. 127(6), 801\u2013823 (2019)","journal-title":"Int. J. Comput. Vis."},{"key":"3059_CR4","doi-asserted-by":"crossref","unstructured":"Chrysos, G.G., Zafeiriou, S.: Deep face deblurring. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 69\u201378 (2017)","DOI":"10.1109\/CVPRW.2017.252"},{"key":"3059_CR5","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S.A.: An image is worth $$16\\times 16$$ words: transformers for image recognition at scale. In: International Conference on Learning Representations (2021)"},{"key":"3059_CR6","unstructured":"Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial nets. In: Advances in Neural Information Processing Systems, vol. 27 (2014)"},{"issue":"5","key":"3059_CR7","doi-asserted-by":"publisher","first-page":"807","DOI":"10.1016\/j.imavis.2009.08.002","volume":"28","author":"R Gross","year":"2010","unstructured":"Gross, R., Matthews, I., Cohn, J., Kanade, T., Baker, S.: Multi-pie. Image Vis. Comput. 28(5), 807\u2013813 (2010)","journal-title":"Image Vis. Comput."},{"key":"3059_CR8","unstructured":"Huang, G.B., Mattar, M., Berg, T., Learned-Miller, E.: Labeled faces in the wild: a database forstudying face recognition in unconstrained environments. In: Workshop on Faces in \u2018Real-Life\u2019 Images: Detection, Alignment, and Recognition (2008)"},{"key":"3059_CR9","doi-asserted-by":"crossref","unstructured":"Jin, M., Hirsch, M., Favaro, P.: Learning face deblurring fast and wide. In: 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 858\u20138588. IEEE Computer Society (2018)","DOI":"10.1109\/CVPRW.2018.00118"},{"key":"3059_CR10","doi-asserted-by":"crossref","unstructured":"Johnson, J., Alahi, A., Fei-Fei, L.: Perceptual losses for real-time style transfer and super-resolution. In: European Conference on Computer Vision, pp. 694\u2013711. Springer (2016)","DOI":"10.1007\/978-3-319-46475-6_43"},{"key":"3059_CR11","unstructured":"Karras, T., Aila, T., Laine, S., Lehtinen, J.: Progressive growing of GANs for improved quality, stability, and variation. arXiv preprint arXiv:1710.10196 (2017)"},{"key":"3059_CR12","first-page":"852","volume":"34","author":"T Karras","year":"2021","unstructured":"Karras, T., Aittala, M., Laine, S., H\u00e4rk\u00f6nen, E., Hellsten, J., Lehtinen, J., Aila, T.: Alias-free generative adversarial networks. Adv. Neural. Inf. Process. Syst. 34, 852\u2013863 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"3059_CR13","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4401\u20134410 (2019)","DOI":"10.1109\/CVPR.2019.00453"},{"key":"3059_CR14","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of stylegan. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8110\u20138119 (2020)","DOI":"10.1109\/CVPR42600.2020.00813"},{"key":"3059_CR15","unstructured":"Kingma, D.P. Ba, J.: Adam: a method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)"},{"key":"3059_CR16","doi-asserted-by":"crossref","unstructured":"Krishnan, D., Tay, T., Fergus, R.: Blind deconvolution using a normalized sparsity measure. In: CVPR 2011, pp. 233\u2013240. IEEE (2011)","DOI":"10.1109\/CVPR.2011.5995521"},{"key":"3059_CR17","doi-asserted-by":"crossref","unstructured":"Kupyn, O., Martyniuk, T., Wu, J., Wang, Z.: Deblurgan-v2: Deblurring (orders-of-magnitude) faster and better. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 8878\u20138887 (2019)","DOI":"10.1109\/ICCV.2019.00897"},{"key":"3059_CR18","doi-asserted-by":"crossref","unstructured":"Lai, W.-S., Huang, J.-B., Hu, Z., Ahuja, N., Yang, M.-H.: A comparative study for single image blind deblurring. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1701\u20131709 (2016)","DOI":"10.1109\/CVPR.2016.188"},{"key":"3059_CR19","doi-asserted-by":"crossref","unstructured":"Le, V., Brandt, J., Lin, Z., Bourdev, L., Huang, T.S.: Interactive facial feature localization. In: European conference on computer vision, pp. 679\u2013692. Springer (2012)","DOI":"10.1007\/978-3-642-33712-3_49"},{"key":"3059_CR20","doi-asserted-by":"crossref","unstructured":"Li, X., Chen, C., Zhou, S., Lin, X., Zuo, W., Zhang, L.: Blind face restoration via deep multi-scale component dictionaries. In: European Conference on Computer Vision, pp. 399\u2013415. Springer (2020)","DOI":"10.1007\/978-3-030-58545-7_23"},{"key":"3059_CR21","doi-asserted-by":"crossref","unstructured":"Lin, S., Zhang, J., Pan, J., Liu, Y., Wang, Y., Chen, J., Ren, J.: Learning to deblur face images via sketch synthesis. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a034, pp. 11523\u201311530 (2020)","DOI":"10.1609\/aaai.v34i07.6818"},{"key":"3059_CR22","doi-asserted-by":"crossref","unstructured":"Liu, Z., Luo, P., Wang, X., Tang, X.: Deep learning face attributes in the wild. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 3730\u20133738 (2015)","DOI":"10.1109\/ICCV.2015.425"},{"key":"3059_CR23","doi-asserted-by":"crossref","unstructured":"Ma, C., Huang, J.B., Yang, X., Yang, M.H.: Hierarchical convolutional features for visual tracking. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 3074\u20133082 (2015)","DOI":"10.1109\/ICCV.2015.352"},{"key":"3059_CR24","unstructured":"Mescheder, L., Geiger, A., Nowozin, S.: Which training methods for GANs do actually converge? In: International Conference on Machine Learning, pp. 3481\u20133490. PMLR (2018)"},{"key":"3059_CR25","unstructured":"Mirza, M., Osindero, S.: Conditional generative adversarial nets. arXiv preprint arXiv:1411.1784 (2014)"},{"issue":"11","key":"3059_CR26","doi-asserted-by":"publisher","first-page":"3086","DOI":"10.1109\/TIP.2011.2145386","volume":"20","author":"J Ni","year":"2011","unstructured":"Ni, J., Turaga, P., Patel, V.M., Chellappa, R.: Example-driven manifold priors for image deconvolution. IEEE Trans. Image Process. 20(11), 3086\u20133096 (2011)","journal-title":"IEEE Trans. Image Process."},{"key":"3059_CR27","doi-asserted-by":"crossref","unstructured":"Pan, J., Sun, D., Pfister, H., Yang, M.H.: Blind image deblurring using dark channel prior. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1628\u20131636 (2016)","DOI":"10.1109\/CVPR.2016.180"},{"key":"3059_CR28","doi-asserted-by":"crossref","unstructured":"Pan, J., Zhe, H., Su, Z., Yang, M.H.: Deblurring face images with exemplars. In: European Conference on Computer Vision, pp. 47\u201362. Springer (2014)","DOI":"10.1007\/978-3-319-10584-0_4"},{"issue":"7","key":"3059_CR29","doi-asserted-by":"publisher","first-page":"3426","DOI":"10.1109\/TIP.2016.2571062","volume":"25","author":"W Ren","year":"2016","unstructured":"Ren, W., Cao, X., Pan, J., Guo, X., Zuo, W., Yang, M.-H.: Image deblurring via enhanced low-rank prior. IEEE Trans. Image Process. 25(7), 3426\u20133437 (2016)","journal-title":"IEEE Trans. Image Process."},{"key":"3059_CR30","doi-asserted-by":"crossref","unstructured":"Shen, Z., Lai, W.-S., Xu, T., Kautz, J., Yang, M.-H.: Deep semantic face deblurring. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 8260\u20138269 (2018)","DOI":"10.1109\/CVPR.2018.00862"},{"issue":"7","key":"3059_CR31","doi-asserted-by":"publisher","first-page":"1829","DOI":"10.1007\/s11263-019-01288-9","volume":"128","author":"Z Shen","year":"2020","unstructured":"Shen, Z., Lai, W.-S., Xu, T., Kautz, J., Yang, M.-H.: Exploiting semantics for face image deblurring. Int. J. Comput. Vis. 128(7), 1829\u20131846 (2020)","journal-title":"Int. J. Comput. Vis."},{"key":"3059_CR32","unstructured":"Shen, Z., Zhang, M., Zhao, H., Yi, S., Li, H.: Efficient attention: Attention with linear complexities. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 3531\u20133539 (2021)"},{"key":"3059_CR33","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)"},{"issue":"6","key":"3059_CR34","doi-asserted-by":"publisher","first-page":"785","DOI":"10.1007\/s11263-019-01148-6","volume":"127","author":"Y Song","year":"2019","unstructured":"Song, Y., Zhang, J., Gong, L., He, S., Bao, L., Pan, J., Yang, Q., Yang, M.-H.: Joint face hallucination and deblurring via structure generation and detail enhancement. Int. J. Comput. Vis. 127(6), 785\u2013800 (2019)","journal-title":"Int. J. Comput. Vis."},{"key":"3059_CR35","doi-asserted-by":"crossref","unstructured":"Sun, L., Cho, S., Wang, J., Hays, J.: Edge-based blur kernel estimation using patch priors. In: IEEE International Conference on Computational Photography (ICCP), pp. 1\u20138. IEEE (2013)","DOI":"10.1109\/ICCPhot.2013.6528301"},{"key":"3059_CR36","doi-asserted-by":"crossref","unstructured":"Tu, Z., Talebi, H., Zhang, H., Yang, F., Milanfar, P., Bovik, A., Li, Y.: Maxim: Multi-axis MLP for image processing. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5769\u20135780 (2022)","DOI":"10.1109\/CVPR52688.2022.00568"},{"key":"3059_CR37","doi-asserted-by":"crossref","unstructured":"Wang, L., Ouyang, W., Wang, X., Lu, H.: Visual tracking with fully convolutional networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 3119\u20133127 (2015)","DOI":"10.1109\/ICCV.2015.357"},{"key":"3059_CR38","doi-asserted-by":"crossref","unstructured":"Wang, X., Li, Y., Zhang, H., Shan, Y.: Towards real-world blind face restoration with generative facial prior. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9168\u20139178 (2021)","DOI":"10.1109\/CVPR46437.2021.00905"},{"key":"3059_CR39","unstructured":"Xia, Z., Chakrabarti, A.: Training image estimators without image ground truth. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"3059_CR40","unstructured":"Xiang, S., Li, H.: On the effects of batch and weight normalization in generative adversarial networks. arXiv preprint arXiv:1704.03971 (2017)"},{"key":"3059_CR41","doi-asserted-by":"crossref","unstructured":"Xu, L., Zheng, S., Jia, J.: Unnatural l0 sparse representation for natural image deblurring. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1107\u20131114 (2013)","DOI":"10.1109\/CVPR.2013.147"},{"key":"3059_CR42","doi-asserted-by":"crossref","unstructured":"Xu, X., Sun, D., Pan, J., Zhang, Y., Pfister, H., Yang, M.-H.: Learning to super-resolve blurry face and text images. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 251\u2013260 (2017)","DOI":"10.1109\/ICCV.2017.36"},{"key":"3059_CR43","doi-asserted-by":"publisher","first-page":"6251","DOI":"10.1109\/TIP.2020.2990354","volume":"29","author":"R Yasarla","year":"2020","unstructured":"Yasarla, R., Perazzi, F., Patel, V.M.: Deblurring face images using uncertainty guided multi-stream semantic networks. IEEE Trans. Image Process. 29, 6251\u20136263 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"3059_CR44","doi-asserted-by":"crossref","unstructured":"Zamir, S.\u00a0W., Arora, A., Khan, S., Hayat, M., Khan, F.\u00a0S., Yang, M.-H., Shao, L.: Multi-stage progressive image restoration. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14821\u201314831 (2021)","DOI":"10.1109\/CVPR46437.2021.01458"},{"key":"3059_CR45","doi-asserted-by":"crossref","unstructured":"Zhang, H., Dai, Y., Li, H., Koniusz, P.: Deep stacked hierarchical multi-patch network for image deblurring. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5978\u20135986 (2019)","DOI":"10.1109\/CVPR.2019.00613"},{"key":"3059_CR46","unstructured":"Zhang, R.: Making convolutional networks shift-invariant again. In: International Conference on Machine Learning, pp. 7324\u201373 34. PMLR (2019)"}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-023-03059-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-023-03059-7\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-023-03059-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,27]],"date-time":"2024-10-27T00:55:28Z","timestamp":1729990528000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-023-03059-7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,8,29]]},"references-count":46,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2024,5]]}},"alternative-id":["3059"],"URL":"https:\/\/doi.org\/10.1007\/s00371-023-03059-7","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"value":"0178-2789","type":"print"},{"value":"1432-2315","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,8,29]]},"assertion":[{"value":"9 August 2023","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"29 August 2023","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have no relevant financial or non-financial interests to disclose. The authors have no conflicts of interest to declare that are relevant to the content of this article. All authors certify that they have no affiliations with or involvement in any organization or entity with any financial interest or non-financial interest in the subject matter or materials discussed in this manuscript. The authors have no financial or proprietary interests in any material discussed in this article.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}