{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T08:18:14Z","timestamp":1776154694121,"version":"3.50.1"},"reference-count":65,"publisher":"Springer Science and Business Media LLC","issue":"7","license":[{"start":{"date-parts":[[2025,2,4]],"date-time":"2025-02-04T00:00:00Z","timestamp":1738627200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,2,4]],"date-time":"2025-02-04T00:00:00Z","timestamp":1738627200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Comput Vis"],"published-print":{"date-parts":[[2025,7]]},"DOI":"10.1007\/s11263-025-02358-x","type":"journal-article","created":{"date-parts":[[2025,2,4]],"date-time":"2025-02-04T08:32:11Z","timestamp":1738657931000},"page":"3822-3838","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":10,"title":["Contrastive Decoupled Representation Learning and Regularization for Speech-Preserving Facial Expression Manipulation"],"prefix":"10.1007","volume":"133","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5848-5624","authenticated-orcid":false,"given":"Tianshui","family":"Chen","sequence":"first","affiliation":[]},{"given":"Jianman","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Zhijing","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Chumei","family":"Qing","sequence":"additional","affiliation":[]},{"given":"Yukai","family":"Shi","sequence":"additional","affiliation":[]},{"given":"Liang","family":"Lin","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,4]]},"reference":[{"key":"2358_CR1","doi-asserted-by":"crossref","unstructured":"Abdal, R., Qin, Y., & Wonka, P. (2019). Image2stylegan: How to embed images into the stylegan latent space? In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 4432\u20134441).","DOI":"10.1109\/ICCV.2019.00453"},{"key":"2358_CR2","doi-asserted-by":"crossref","unstructured":"Abdal, R., Qin, Y., & Wonka, P. (2020). Image2stylegan++: How to edit the embedded images? In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 8296\u20138305).","DOI":"10.1109\/CVPR42600.2020.00832"},{"key":"2358_CR3","doi-asserted-by":"crossref","unstructured":"Alaluf, Y., Patashnik, O., & Cohen-Or, D. (2021). Restyle: A residual-based stylegan encoder via iterative refinement. In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 6711\u20136720).","DOI":"10.1109\/ICCV48922.2021.00664"},{"key":"2358_CR4","unstructured":"Berndt, D. J., & Clifford, J. (1994). Using dynamic time warping to find patterns in time series. In KDD workshop, Seattle, WA, USA (Vol.\u00a010, pp. 359\u2013370)."},{"key":"2358_CR5","doi-asserted-by":"crossref","unstructured":"Blanz, V., & Vetter, T. (2023). A morphable model for the synthesis of 3d faces. In Seminal graphics papers: Pushing the boundaries (Vol. 2, pp. 157\u2013164).","DOI":"10.1145\/3596711.3596730"},{"key":"2358_CR6","doi-asserted-by":"crossref","unstructured":"Cao, P., Yang, L., Liu, D., Yang, X., Huang, T., & Song, Q. (2024). What decreases editing capability? domain-specific hybrid refinement for improved gan inversion. In Proceedings of the IEEE\/CVF winter conference on applications of computer vision (pp. 4240\u20134249).","DOI":"10.1109\/WACV57701.2024.00419"},{"key":"2358_CR7","unstructured":"Chen, T., Kornblith, S., Norouzi, M., & Hinton, G. (2020). A simple framework for contrastive learning of visual representations. In International conference on machine learning (pp. 1597\u20131607). PMLR."},{"key":"2358_CR8","doi-asserted-by":"crossref","unstructured":"Chen, T., Lin, J., Yang, Z., Qing, C., & Lin, L. (2024a). Learning adaptive spatial coherent correlations for speech-preserving facial expression manipulation. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 7267\u20137276).","DOI":"10.1109\/CVPR52733.2024.00694"},{"key":"2358_CR9","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-024-02127-2","author":"T Chen","year":"2024","unstructured":"Chen, T., Pu, T., Liu, L., Shi, Y., Yang, Z., & Lin, L. (2024). Heterogeneous semantic transfer for multi-label recognition with partial labels. International Journal of Computer Vision. https:\/\/doi.org\/10.1007\/s11263-024-02127-2","journal-title":"International Journal of Computer Vision"},{"key":"2358_CR10","doi-asserted-by":"crossref","unstructured":"Choi, Y., Choi, M., Kim, M., Ha, J. W., Kim, S., & Choo, J. (2018). Stargan: Unified generative adversarial networks for multi-domain image-to-image translation. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 8789\u20138797).","DOI":"10.1109\/CVPR.2018.00916"},{"key":"2358_CR11","doi-asserted-by":"crossref","unstructured":"Conneau, A., Baevski, A., Collobert, R., Mohamed, A., & Auli, M. (2020). Unsupervised cross-lingual representation learning for speech recognition. arXiv preprint arXiv:2006.13979.","DOI":"10.21437\/Interspeech.2021-329"},{"key":"2358_CR12","doi-asserted-by":"publisher","first-page":"14777","DOI":"10.1109\/TPAMI.2023.3308102","volume":"45","author":"Y Dalva","year":"2023","unstructured":"Dalva, Y., Pehlivan, H., Hatipoglu, O. I., Moran, C., & Dundar, A. (2023). Image-to-image translation with disentangled latent vectors for face editing. IEEE Transactions on Pattern Analysis and Machine Intelligence., 45, 14777\u201314788.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence."},{"key":"2358_CR13","doi-asserted-by":"crossref","unstructured":"d\u2019Apolito, S., Paudel, D. P., Huang, Z., Romero, A., & Van\u00a0Gool, L. (2021). Ganmut: Learning interpretable conditional space for gamut of emotions. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 568\u2013577).","DOI":"10.1109\/CVPR46437.2021.00063"},{"key":"2358_CR14","doi-asserted-by":"crossref","unstructured":"Deng, J., Guo, J., Xue, N., & Zafeiriou, S. (2019). Arcface: Additive angular margin loss for deep face recognition. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 4690\u20134699).","DOI":"10.1109\/CVPR.2019.00482"},{"key":"2358_CR15","doi-asserted-by":"crossref","unstructured":"Ding, H., Sricharan, K., & Chellappa, R. (2018). Exprgan: Facial expression editing with controllable expression intensity. In Proceedings of the AAAI conference on artificial intelligence (vol.\u00a032).","DOI":"10.1609\/aaai.v32i1.12277"},{"key":"2358_CR16","doi-asserted-by":"crossref","unstructured":"Ding, Z., Zhang, X., Xia, Z., Jebe, L., Tu, Z., & Zhang, X. (2023). Diffusionrig: Learning personalized priors for facial appearance editing. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 12736\u201312746).","DOI":"10.1109\/CVPR52729.2023.01225"},{"issue":"1","key":"2358_CR17","doi-asserted-by":"publisher","first-page":"31","DOI":"10.1109\/TBIOM.2021.3049576","volume":"3","author":"MC Doukas","year":"2021","unstructured":"Doukas, M. C., Koujan, M. R., Sharmanska, V., Roussos, A., & Zafeiriou, S. (2021). Head2head++: Deep facial attributes re-targeting. IEEE Transactions on Biometrics, Behavior, and Identity Science, 3(1), 31\u201343.","journal-title":"IEEE Transactions on Biometrics, Behavior, and Identity Science"},{"issue":"4","key":"2358_CR18","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3450626.3459936","volume":"40","author":"Y Feng","year":"2021","unstructured":"Feng, Y., Feng, H., Black, M. J., & Bolkart, T. (2021). Learning an animatable detailed 3d face model from in-the-wild images. ACM Transactions on Graphics (ToG), 40(4), 1\u201313.","journal-title":"ACM Transactions on Graphics (ToG)"},{"key":"2358_CR19","doi-asserted-by":"crossref","unstructured":"Filntisis, P. P., Retsinas, G., Paraperas-Papantoniou, F., Katsamanis, A., Roussos, A., & Maragos, P. (2023). Spectre: Visual speech-informed perceptual 3d facial expression reconstruction from videos. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 5744\u20135754).","DOI":"10.1109\/CVPRW59228.2023.00609"},{"issue":"2","key":"2358_CR20","first-page":"5","volume":"3","author":"E Friesen","year":"1978","unstructured":"Friesen, E., & Ekman, P. (1978). Facial action coding system: A technique for the measurement of facial movement. Palo Alto, 3(2), 5.","journal-title":"Palo Alto"},{"key":"2358_CR21","doi-asserted-by":"crossref","unstructured":"Fu, H., Wang, Z., Gong, K., Wang, K., Chen, T., Li, H., Zeng, H., & Kang, W. (2024). Mimic: Speaking style disentanglement for speech-driven 3d facial animation. In Proceedings of the AAAI conference on artificial intelligence (Vol.\u00a038, pp. 1770\u20131777).","DOI":"10.1609\/aaai.v38i2.27945"},{"key":"2358_CR22","doi-asserted-by":"crossref","unstructured":"Geng, Z., Cao, C., & Tulyakov, S. (2019). 3d guided fine-grained face manipulation. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 9821\u20139830).","DOI":"10.1109\/CVPR.2019.01005"},{"key":"2358_CR23","doi-asserted-by":"publisher","first-page":"2744","DOI":"10.1007\/s11263-020-01361-8","volume":"128","author":"Z Geng","year":"2020","unstructured":"Geng, Z., Cao, C., & Tulyakov, S. (2020). Towards photo-realistic facial expression manipulation. International Journal of Computer Vision, 128, 2744\u20132761.","journal-title":"International Journal of Computer Vision"},{"key":"2358_CR24","doi-asserted-by":"crossref","unstructured":"Hu, X., Huang, Q., Shi, Z., Li, S., Gao, C., Sun, L., & Li, Q. (2022). Style transformer for image inversion and editing. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 11337\u201311346).","DOI":"10.1109\/CVPR52688.2022.01105"},{"key":"2358_CR25","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J. Y., Zhou, T., & Efros, A. A. (2017). Image-to-image translation with conditional adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 1125\u20131134).","DOI":"10.1109\/CVPR.2017.632"},{"key":"2358_CR26","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., & Aila, T. (2019). A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 4401\u20134410).","DOI":"10.1109\/CVPR.2019.00453"},{"key":"2358_CR27","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., & Aila, T. (2020). Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 8110\u20138119).","DOI":"10.1109\/CVPR42600.2020.00813"},{"issue":"5","key":"2358_CR28","doi-asserted-by":"publisher","first-page":"1455","DOI":"10.1007\/s11263-020-01304-3","volume":"128","author":"D Kollias","year":"2020","unstructured":"Kollias, D., Cheng, S., Ververas, E., Kotsia, I., & Zafeiriou, S. (2020). Deep neural network augmentation: Generating faces for affect analysis. International Journal of Computer Vision, 128(5), 1455\u20131484.","journal-title":"International Journal of Computer Vision"},{"key":"2358_CR29","doi-asserted-by":"crossref","unstructured":"Li, B., Ma, T., Zhang, P., Hua, M., Liu, W., He, Q., & Yi, Z. (2023). Reganie: Rectifying gan inversion errors for accurate real image editing. In Proceedings of the AAAI conference on artificial intelligence (Vol.\u00a037, pp. 1269\u20131277).","DOI":"10.1609\/aaai.v37i1.25210"},{"key":"2358_CR30","unstructured":"Lipton, Z. C., & Tripathi, S. (2017). Precise recovery of latent vectors from generative adversarial networks. arXiv preprint arXiv:1702.04782"},{"issue":"12","key":"2358_CR31","doi-asserted-by":"publisher","first-page":"14590","DOI":"10.1109\/TPAMI.2023.3298868","volume":"45","author":"Y Liu","year":"2023","unstructured":"Liu, Y., Li, Q., Deng, Q., Sun, Z., & Yang, M. H. (2023). Gan-based facial attribute manipulation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(12), 14590\u201314610.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2358_CR32","doi-asserted-by":"crossref","unstructured":"Liu, Z., Li, M., Zhang, Y., Wang, C., Zhang, Q., Wang, J., & Nie, Y. (2023b). Fine-grained face swapping via regional gan inversion. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 8578\u20138587).","DOI":"10.1109\/CVPR52729.2023.00829"},{"issue":"5","key":"2358_CR33","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0196391","volume":"13","author":"SR Livingstone","year":"2018","unstructured":"Livingstone, S. R., & Russo, F. A. (2018). The Ryerson audio-visual database of emotional speech and song (RAVDESS): A dynamic, multimodal set of facial and vocal expressions in North American English. PLoS ONE, 13(5), e0196,391.","journal-title":"PLoS ONE"},{"key":"2358_CR34","unstructured":"Loshchilov, I., & Hutter, F. (2017). Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101"},{"key":"2358_CR35","doi-asserted-by":"crossref","unstructured":"Magnusson, I., Sankaranarayanan, A., & Lippman, A. (2021). Invertible frowns: Video-to-video facial emotion translation. arXiv e-prints.","DOI":"10.1145\/3476099.3484317"},{"key":"2358_CR36","doi-asserted-by":"crossref","unstructured":"Papantoniou, F. P., Filntisis, P. P., Maragos, P., & Roussos, A. (2022). Neural emotion director: Speech-preserving semantic control of facial expressions in \u201cin-the-wild\u201d videos. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 18781\u201318790).","DOI":"10.1109\/CVPR52688.2022.01822"},{"key":"2358_CR37","doi-asserted-by":"crossref","unstructured":"Pehlivan, H., Dalva, Y., & Dundar, A. (2023). Styleres: Transforming the residuals for real image editing with stylegan. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 1828\u20131837).","DOI":"10.1109\/CVPR52729.2023.00182"},{"key":"2358_CR38","doi-asserted-by":"crossref","unstructured":"Prajwal, K., Mukhopadhyay, R., Namboodiri, V. P., & Jawahar, C. (2020). A lip sync expert is all you need for speech to lip generation in the wild. In Proceedings of the 28th ACM international conference on multimedia (pp. 484\u2013492).","DOI":"10.1145\/3394171.3413532"},{"key":"2358_CR39","doi-asserted-by":"publisher","first-page":"698","DOI":"10.1007\/s11263-019-01210-3","volume":"128","author":"A Pumarola","year":"2020","unstructured":"Pumarola, A., Agudo, A., Martinez, A. M., Sanfeliu, A., & Moreno-Noguer, F. (2020). Ganimation: One-shot anatomically consistent facial animation. International Journal of Computer Vision, 128, 698\u2013713.","journal-title":"International Journal of Computer Vision"},{"key":"2358_CR40","unstructured":"Radford, A., Kim, J. W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., & Clark, J., et\u00a0al. (2021). Learning transferable visual models from natural language supervision. In International conference on machine learning (pp. 8748\u20138763). PMLR."},{"key":"2358_CR41","doi-asserted-by":"crossref","unstructured":"Richardson, E., Alaluf, Y., Patashnik, O., Nitzan, Y., Azar, Y., Shapiro, S., & Cohen-Or, D. (2021). Encoding in style: A stylegan encoder for image-to-image translation. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 2287\u20132296).","DOI":"10.1109\/CVPR46437.2021.00232"},{"key":"2358_CR42","doi-asserted-by":"publisher","first-page":"400","DOI":"10.1214\/aoms\/1177729586","volume":"22","author":"H Robbins","year":"1951","unstructured":"Robbins, H., & Monro, S. (1951). A stochastic approximation method. The Annals of Mathematical Statistics, 22, 400\u2013407.","journal-title":"The Annals of Mathematical Statistics"},{"issue":"1","key":"2358_CR43","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3544777","volume":"42","author":"D Roich","year":"2022","unstructured":"Roich, D., Mokady, R., Bermano, A. H., & Cohen-Or, D. (2022). Pivotal tuning for latent-based editing of real images. ACM Transactions on Graphics (TOG), 42(1), 1\u201313.","journal-title":"ACM Transactions on Graphics (TOG)"},{"key":"2358_CR44","doi-asserted-by":"crossref","unstructured":"Solanki, G. K., & Roussos, A. (2023). Deep semantic manipulation of facial videos. In European conference on computer vision (pp. 104\u2013120). Berlin: Springer.","DOI":"10.1007\/978-3-031-25075-0_8"},{"key":"2358_CR45","doi-asserted-by":"publisher","first-page":"1400","DOI":"10.1109\/TAFFC.2023.3334511","volume":"15","author":"Z Sun","year":"2023","unstructured":"Sun, Z., Wen, Y. H., Lv, T., Sun, Y., Zhang, Z., Wang, Y., & Liu, Y. J. (2023). Continuously controllable facial expression editing in talking face videos. IEEE Transactions on Affective Computing., 15, 1400\u20131413.","journal-title":"IEEE Transactions on Affective Computing."},{"key":"2358_CR46","doi-asserted-by":"crossref","unstructured":"Tewari, A., Elgharib, M., Bharaj, G., Bernard, F., Seidel, H. P., P\u00e9rez, P., Zollhofer, M., & Theobalt, C. (2020). Stylerig: Rigging stylegan for 3d control over portrait images. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 6142\u20136151).","DOI":"10.1109\/CVPR42600.2020.00618"},{"issue":"4","key":"2358_CR47","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3450626.3459838","volume":"40","author":"O Tov","year":"2021","unstructured":"Tov, O., Alaluf, Y., Nitzan, Y., Patashnik, O., & Cohen-Or, D. (2021). Designing an encoder for stylegan image manipulation. ACM Transactions on Graphics (TOG), 40(4), 1\u201314.","journal-title":"ACM Transactions on Graphics (TOG)"},{"key":"2358_CR48","doi-asserted-by":"crossref","unstructured":"Tripathy, S., Kannala, J., & Rahtu, E. (2020). Icface: interpretable and controllable face reenactment using gans. In Proceedings of the IEEE\/CVF winter conference on applications of computer vision (pp. 3385\u20133394).","DOI":"10.1109\/WACV45572.2020.9093474"},{"key":"2358_CR49","doi-asserted-by":"crossref","unstructured":"Tzaban, R., Mokady, R., Gal, R., Bermano, A., & Cohen-Or, D. (2022). Stitch it in time: Gan-based facial editing of real videos. In SIGGRAPH Asia 2022 conference papers (pp. 1\u20139).","DOI":"10.1145\/3550469.3555382"},{"key":"2358_CR50","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, \u0141, & Polosukhin, I. (2017). Attention is all you need. In Advances in neural information processing systems (Vol. 30)."},{"issue":"10","key":"2358_CR51","doi-asserted-by":"publisher","first-page":"2629","DOI":"10.1007\/s11263-020-01338-7","volume":"128","author":"E Ververas","year":"2020","unstructured":"Ververas, E., & Zafeiriou, S. (2020). Slidergan: Synthesizing expressive face images by sliding 3d blendshape parameters. International Journal of Computer Vision, 128(10), 2629\u20132650.","journal-title":"International Journal of Computer Vision"},{"key":"2358_CR52","doi-asserted-by":"crossref","unstructured":"Wang, K., Wu, Q., Song, L., Yang, Z., Wu, W., Qian, C., He, R., Qiao, Y., & Loy, C. C. (2020). Mead: A large-scale audio-visual dataset for emotional talking-face generation. In European conference on computer vision (pp. 700\u2013717). Berlin: Springer.","DOI":"10.1007\/978-3-030-58589-1_42"},{"key":"2358_CR53","doi-asserted-by":"crossref","unstructured":"Wang, T., Zhang, Y., Fan, Y., Wang, J., & Chen, Q. (2022). High-fidelity GAN inversion for image attribute editing. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 11379\u201311388).","DOI":"10.1109\/CVPR52688.2022.01109"},{"key":"2358_CR54","doi-asserted-by":"crossref","unstructured":"Wu, Z., Zhu, Z., Du, J., & Bai, X. (2022). CCPL: contrastive coherence preserving loss for versatile style transfer. In European conference on computer vision (pp. 189\u2013206). Berlin: Springer.","DOI":"10.1007\/978-3-031-19787-1_11"},{"issue":"3","key":"2358_CR55","doi-asserted-by":"publisher","first-page":"3121","DOI":"10.1109\/TPAMI.2022.3181070","volume":"45","author":"W Xia","year":"2022","unstructured":"Xia, W., Zhang, Y., Yang, Y., Xue, J. H., Zhou, B., & Yang, M. H. (2022). Gan inversion: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(3), 3121\u20133138.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"2358_CR56","doi-asserted-by":"crossref","unstructured":"Xu, Y., AlBahar, B., & Huang, J. B. (2022). Temporally consistent semantic video editing. In European Conference on Computer Vision (pp. 357\u2013374). Berlin: Springer.","DOI":"10.1007\/978-3-031-19784-0_21"},{"key":"2358_CR57","doi-asserted-by":"crossref","unstructured":"Xu, Y., He, S., Wong, K. Y. K., & Luo, P. (2023a). Rigid: Recurrent gan inversion and editing of real face videos. In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 13691\u201313701).","DOI":"10.1109\/ICCV51070.2023.01259"},{"issue":"2s","key":"2358_CR58","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3559107","volume":"19","author":"Y Xu","year":"2023","unstructured":"Xu, Y., Yang, Z., Chen, T., Li, K., & Qing, C. (2023b). Progressive transformer machine for natural character reenactment. ACM Transactions on Multimedia Computing, Communications and Applications, 19(2s), 1\u201322.","journal-title":"ACM Transactions on Multimedia Computing, Communications and Applications"},{"key":"2358_CR59","doi-asserted-by":"crossref","unstructured":"Xu, Z., Chen, T., Yang, Z., Qing, C., Shi, Y., & Lin, L. (2024). Self-supervised emotion representation disentanglement for speech-preserving facial expression manipulation. In ACM Multimedia.","DOI":"10.1145\/3664647.3681017"},{"issue":"4","key":"2358_CR60","doi-asserted-by":"publisher","first-page":"1336","DOI":"10.1007\/s11263-023-01938-z","volume":"132","author":"N Yang","year":"2024","unstructured":"Yang, N., Luan, X., Jia, H., Han, Z., Li, X., & Tang, Y. (2024). CCR: Facial image editing with continuity, consistency and reversibility. International Journal of Computer Vision, 132(4), 1336-1349 .","journal-title":"International Journal of Computer Vision"},{"key":"2358_CR61","doi-asserted-by":"crossref","unstructured":"Yang, X., Xu, X., & Chen, Y. (2023). Out-of-domain gan inversion via invertibility decomposition for photo-realistic human face manipulation. In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 7492\u20137501).","DOI":"10.1109\/ICCV51070.2023.00689"},{"key":"2358_CR62","doi-asserted-by":"crossref","unstructured":"Yildirim, A. B., Pehlivan, H., Bilecen, B. B., & Dundar, A. (2023). Diverse inpainting and editing with gan inversion. In Proceedings of the IEEE\/CVF international conference on computer vision (pp. 23120\u201323130).","DOI":"10.1109\/ICCV51070.2023.02113"},{"key":"2358_CR63","unstructured":"Zhao, Z., & Patras, I. (2023). Prompting visual-language models for dynamic facial expression recognition. arXiv preprint arXiv:2308.13382"},{"key":"2358_CR64","doi-asserted-by":"publisher","first-page":"2607","DOI":"10.1109\/TPAMI.2023.3310872","volume":"46","author":"J Zhu","year":"2024","unstructured":"Zhu, J., Shen, Y., Xu, Y., Zhao, D., Chen, Q., & Zhou, B. (2024). In-domain gan inversion for faithful reconstruction and editability. IEEE Transactions on Pattern Analysis and Machine Intelligence., 46, 2607\u20132621.","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence."},{"key":"2358_CR65","doi-asserted-by":"crossref","unstructured":"Zhu, J. Y., Park, T., Isola, P., & Efros, A. A. (2017). Unpaired image-to-image translation using cycle-consistent adversarial networks. In Proceedings of the IEEE international conference on computer vision (pp. 2223\u20132232).","DOI":"10.1109\/ICCV.2017.244"}],"container-title":["International Journal of Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02358-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11263-025-02358-x\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11263-025-02358-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,7]],"date-time":"2025-06-07T05:58:46Z","timestamp":1749275926000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11263-025-02358-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,2,4]]},"references-count":65,"journal-issue":{"issue":"7","published-print":{"date-parts":[[2025,7]]}},"alternative-id":["2358"],"URL":"https:\/\/doi.org\/10.1007\/s11263-025-02358-x","relation":{},"ISSN":["0920-5691","1573-1405"],"issn-type":[{"value":"0920-5691","type":"print"},{"value":"1573-1405","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,2,4]]},"assertion":[{"value":"8 June 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 January 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"4 February 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}}]}}