{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,4]],"date-time":"2026-02-04T17:36:15Z","timestamp":1770226575239,"version":"3.49.0"},"reference-count":27,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2024,9,13]],"date-time":"2024-09-13T00:00:00Z","timestamp":1726185600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,9,13]],"date-time":"2024-09-13T00:00:00Z","timestamp":1726185600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["62106214"],"award-info":[{"award-number":["62106214"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2024,10]]},"DOI":"10.1007\/s00530-024-01466-x","type":"journal-article","created":{"date-parts":[[2024,9,13]],"date-time":"2024-09-13T16:10:36Z","timestamp":1726243836000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["CAFIN: cross-attention based face image repair network"],"prefix":"10.1007","volume":"30","author":[{"given":"Yaqian","family":"Li","sequence":"first","affiliation":[]},{"given":"Kairan","family":"Li","sequence":"additional","affiliation":[]},{"given":"Haibin","family":"Li","sequence":"additional","affiliation":[]},{"given":"Wenming","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,13]]},"reference":[{"key":"1466_CR1","first-page":"417","volume-title":"Image Inpainting[C]\/\/short for Special Interest Group on GRAPHics and Interactive Techniques (SIGGRAPH)","author":"M Bertalmio","year":"2000","unstructured":"Bertalmio, M., Sapiro, G., Caselles, V., et al.: Image inpainting. Short for Special Interest Group on GRAPHics and Interactive Techniques (SIGGRAPH), pp. 417\u2013424. ACM, New Orleans, United States (2000)"},{"issue":"4","key":"1466_CR2","doi-asserted-by":"publisher","first-page":"436","DOI":"10.1006\/jvci.2001.0487","volume":"12","author":"TF Chan","year":"2001","unstructured":"Chan, T.F., Shen, J.: Nontexture inpainting by curvature-driven diffusions. J. Vis. Commun. Image Represent. 12(4), 436\u2013449 (2001)","journal-title":"J. Vis. Commun. Image Represent."},{"key":"1466_CR3","unstructured":"Kingma, D.P., Welling, M.: Auto-encoding variational Bayes arXiv org. (2014)"},{"key":"1466_CR4","doi-asserted-by":"crossref","unstructured":"Pathak, D., Krahenbuhl, P., Donahue, J., et al.: Context encoders: feature learning by inpainting. Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 2536\u20132544 (2016)","DOI":"10.1109\/CVPR.2016.278"},{"issue":"4","key":"1466_CR5","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3072959.3073659","volume":"36","author":"S Iizuka","year":"2017","unstructured":"Iizuka, S., Simo-Serra, E., Ishikawa, H.: Globally and locally consistent image completion. ACM Trans. Graphics (TOG). 36(4), 1\u201314 (2017)","journal-title":"ACM Trans. Graphics (TOG)"},{"key":"1466_CR6","doi-asserted-by":"crossref","unstructured":"Yu, J., Lin, Z., Yang, J., et al.: Generative image inpainting with contextual attention. In: Proceedings of the IEEE conference on computer vision and pattern recognition. piscataway, iEEE,pp. 5505\u20135514 (2018)","DOI":"10.1109\/CVPR.2018.00577"},{"key":"1466_CR7","unstructured":"Nazeri, K., Ng, E., Joseph, T., et al.: Edgeconnect: generative image inpainting with adversarial edge learning. (2019). arXiv preprint arXiv:1901.00212"},{"key":"1466_CR8","doi-asserted-by":"crossref","unstructured":"Li, J., Wang, N., Zhang, L., et al.: Recurrent feature reasoning for image inpainting. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 7760\u20137768 (2020)","DOI":"10.1109\/CVPR42600.2020.00778"},{"key":"1466_CR9","doi-asserted-by":"crossref","unstructured":"Guo, X., Yang, H., Huang, D.: Image inpainting via conditional texture and structure dual generation. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp. 14134\u201314143 (2021)","DOI":"10.1109\/ICCV48922.2021.01387"},{"key":"1466_CR10","doi-asserted-by":"crossref","unstructured":"Guo, Q., Li, X., Juefei-Xu, F., Yu, H., Liu, Y., et al.: Jpgnet: joint predictive filtering and generative network for image inpainting. arXiv e-prints, pp. arXiv- 2107 (2021)","DOI":"10.1145\/3474085.3475170"},{"issue":"6","key":"1466_CR11","first-page":"2997","volume":"27","author":"L Li","year":"2018","unstructured":"Li, L., Paris, S.: Sub-pixel convolutional neural networks for Super-resolution. IEEE Trans. Image Process. 27(6), 2997\u20133009 (2018)","journal-title":"IEEE Trans. Image Process."},{"key":"1466_CR12","doi-asserted-by":"crossref","unstructured":"Zhang, K., Ren, W., Luo, W., Lai, W.-S., Stenger, B., Yang,  M.-H., Li, H.: Deep image deblurring. Surv. arXiv Preprint arXiv:220110700 (2022)","DOI":"10.1007\/s11263-022-01633-5"},{"issue":"7","key":"1466_CR14","doi-asserted-by":"publisher","first-page":"3266","DOI":"10.1109\/TVCG.2022.3156949","volume":"29","author":"Y Zeng","year":"2023","unstructured":"Zeng, Y., Fu, J., Chao, H., Guo, B.: Aggregated contextual transformations for high-resolution image inpainting. IEEE Trans. Vis. Comput. Graph. 29(7), 3266\u20133280 (2023)","journal-title":"IEEE Trans. Vis. Comput. Graph."},{"key":"1466_CR15","unstructured":"Yu, J., Lin, Z., Yang, J., Shen, X., Lu, X., Thomas, S., Huang: Proceedings of the IEEE\/CVF international conference on computer vision (ICCV), pp. 4471\u20134480 (2019)"},{"key":"1466_CR16","doi-asserted-by":"crossref","unstructured":"Voo, K.T.R., Jiang, L., Loy, C.C.: Delving into high-quality synthetic face occlusion segmentation datasets. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops (2022)","DOI":"10.1109\/CVPRW56347.2022.00517"},{"issue":"5786","key":"1466_CR18","first-page":"504","volume":"313","author":"GE Hinton","year":"2006","unstructured":"Hinton, G.E., Salakhutdinov, R.: Reducing the dimensionality of data with neural networks. Cience. 313(5786), 504\u2013507 (2006)","journal-title":"Cience"},{"key":"1466_CR19","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Van Der Maaten, L., et al.: Densely connected convolutional networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. piscataway. IEEE, pp. 4700\u20134708 (2017)","DOI":"10.1109\/CVPR.2017.243"},{"key":"1466_CR20","first-page":"3483","volume":"28","author":"K Sohn","year":"2015","unstructured":"Sohn, K., Lee, H., Yan, X.: Learning structured output representation using deep conditional generative models[J]. Adv. Neural. Inf. Process. Syst. 28, 3483\u20133491 (2015)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"issue":"6","key":"1466_CR22","doi-asserted-by":"publisher","first-page":"1074","DOI":"10.1049\/cje.2020.09.008","volume":"29","author":"Z Qiang","year":"2020","unstructured":"Qiang, Z., He, L., Dai, F., et al.: Image inpainting based on improved deep convolutional autoencoder network. Chin. J. Electron. 29(6), 1074\u20131084 (2020)","journal-title":"Chin. J. Electron."},{"key":"1466_CR23","unstructured":"Dai, T., Cai, J., Zhang, Y., Xia, S.T., Zhang, L.: Sub-pixel convolutional neural network for image super-resolution reconstruction. In: 2019 IEEE International Conference on Image Processing (ICIP), pp. 3796\u20133800. IEEE (2019)"},{"key":"1466_CR24","unstructured":"Aitken, A.P., Ledig, C., Theis, L., Caballero, J., Wang, Z., Shi, W.: Checkerboard artifact free sub-pixel convolution: A note on sub-pixel convolution, resize convolution and convolution resize (2017). arXiv preprint arXiv. 1707.02937."},{"key":"1466_CR26","doi-asserted-by":"crossref","unstructured":"Pan, Y., Yao, T., Li, Y., et al.: X-linear attention networks for image captioning. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 10971\u201310980 (2020)","DOI":"10.1109\/CVPR42600.2020.01098"},{"key":"1466_CR27","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., et al.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"issue":"2","key":"1466_CR28","doi-asserted-by":"publisher","first-page":"1489","DOI":"10.1109\/TPAMI.2022.3164083","volume":"45","author":"Y Li","year":"2022","unstructured":"Li, Y., Yao, T., Pan, Y., et al.: Contextual transformer networks for visual recognition. IEEE Trans. Pattern Anal. Mach. Intell. 45(2), 1489\u20131500 (2022)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"1466_CR29","doi-asserted-by":"crossref","unstructured":"Mao, X., Qi, G., Chen, Y., et al.: Towards robust vision transformer. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 12042\u201312051 (2022)","DOI":"10.1109\/CVPR52688.2022.01173"},{"key":"1466_CR30","doi-asserted-by":"crossref","unstructured":"Yao, T., Li, Y., Pan, Y., et al.: Dual vision transformer. IEEE Trans. Pattern Anal. Mach. Intell. (2023)","DOI":"10.1109\/TPAMI.2023.3268446"},{"key":"1466_CR31","doi-asserted-by":"crossref","unstructured":"Poirier-Ginter, Y., Lalonde, J.F.: Robust unsupervised stylegan image restoration. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 22292\u201322301 (2023)","DOI":"10.1109\/CVPR52729.2023.02135"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01466-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-024-01466-x\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01466-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T18:13:22Z","timestamp":1730139202000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-024-01466-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,13]]},"references-count":27,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2024,10]]}},"alternative-id":["1466"],"URL":"https:\/\/doi.org\/10.1007\/s00530-024-01466-x","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,9,13]]},"assertion":[{"value":"29 December 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"23 August 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"13 September 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"Not applicable.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to participate"}},{"value":"Not applicable.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}},{"value":"The authors declare no competing interests.","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"271"}}