{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,8]],"date-time":"2026-02-08T08:56:29Z","timestamp":1770540989062,"version":"3.49.0"},"reference-count":66,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2025,2,1]],"date-time":"2025-02-01T00:00:00Z","timestamp":1738368000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,2,1]],"date-time":"2025-02-01T00:00:00Z","timestamp":1738368000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,2,1]],"date-time":"2025-02-01T00:00:00Z","timestamp":1738368000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Guangdong International Technology Cooperation Project","award":["2022A0505050009"],"award-info":[{"award-number":["2022A0505050009"]}]},{"name":"China National Key R&#x0026;D Program","award":["2023YFE0202700"],"award-info":[{"award-number":["2023YFE0202700"]}]},{"name":"Key-Area Research and Development Program of Guangzhou City","award":["2023B01J0022"],"award-info":[{"award-number":["2023B01J0022"]}]},{"name":"Guangdong Natural Science Funds for Distinguished Young Scholar","award":["2023B1515020097"],"award-info":[{"award-number":["2023B1515020097"]}]},{"name":"Singapore MOE Tier 1 Funds","award":["MSS23C002"],"award-info":[{"award-number":["MSS23C002"]}]},{"name":"National Research Foundation Singapore under the AI Singapore Programme","award":["AISG3-GV-2023-011"],"award-info":[{"award-number":["AISG3-GV-2023-011"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Visual. Comput. Graphics"],"published-print":{"date-parts":[[2025,2]]},"DOI":"10.1109\/tvcg.2024.3364162","type":"journal-article","created":{"date-parts":[[2024,2,9]],"date-time":"2024-02-09T18:31:34Z","timestamp":1707503494000},"page":"1465-1477","source":"Crossref","is-referenced-by-count":6,"title":["Learning an Interpretable Stylized Subspace for 3D-Aware Animatable Artforms"],"prefix":"10.1109","volume":"31","author":[{"ORCID":"https:\/\/orcid.org\/0009-0006-0344-2439","authenticated-orcid":false,"given":"Chenxi","family":"Zheng","sequence":"first","affiliation":[{"name":"School of Computer Science and Engineering, South China University of Technology, Guangzhou, Guangdong, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6621-0594","authenticated-orcid":false,"given":"Bangzhen","family":"Liu","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, South China University of Technology, Guangzhou, Guangdong, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8006-3663","authenticated-orcid":false,"given":"Xuemiao","family":"Xu","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, South China University of Technology, Guangzhou, Guangdong, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7662-9831","authenticated-orcid":false,"given":"Huaidong","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Future Technology, South China University of Technology, Guangzhou, Guangdong, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3802-4644","authenticated-orcid":false,"given":"Shengfeng","family":"He","sequence":"additional","affiliation":[{"name":"School of Computing and Information Systems, Singapore Management University, Singapore"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3386569.3392469"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01565"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00574"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.296"},{"key":"ref5","first-page":"26561","article-title":"Artistic style transfer with internal-external learning and contrastive learning","volume":"34","author":"Chen","year":"2021","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58539-6_37"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19787-1_25"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01041"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2019.00038"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19784-0_37"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1145\/3528223.3530164"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.265"},{"key":"ref13","first-page":"1","article-title":"StyleNeRF: A style-based 3D aware generator for high-resolution image synthesis","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Gu"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2021.3114308"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.167"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01780"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3450626.3459860"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3550469.3555422"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1603.08155"},{"key":"ref20","first-page":"12104","article-title":"Training generative adversarial networks with limited data","volume":"33","author":"Karras","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00453"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/3550469.3555416"},{"key":"ref23","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00658"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.740"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01395"},{"key":"ref27","first-page":"3481","article-title":"Which training methods for GANs do actually converge?","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Mescheder"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_24"},{"key":"ref29","article-title":"Few-shot cross-domain image generation via inference-time latent-code learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Mondal"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-950"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1145\/3528223.3530107"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01060"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01314"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58545-7_19"},{"key":"ref35","article-title":"Resolution dependent GAN interpolation for controllable image synthesis between domains","author":"Pinkney","year":"2020"},{"key":"ref36","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01350"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00232"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1145\/3544777"},{"key":"ref40","first-page":"20154","article-title":"GRAF: Generative radiance fields for 3D-aware image synthesis","volume":"33","author":"Schwarz","year":"2020","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref41","first-page":"33999","article-title":"VoxGRAF: Fast 3D-aware image synthesis with sparse voxel grids","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Schwarz"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2022.3146000"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2021.3067201"},{"key":"ref44","first-page":"7137","article-title":"First order motion model for image animation","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Siarohin"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01344"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/cvprw63382.2024.00081"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19790-1_1"},{"key":"ref48","first-page":"20625","article-title":"Improved StyleGAN-v2 based inversion for out-of-distribution images","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Subramanyam"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1145\/3450626.3459838"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2021.3139913"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2023.3283400"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01109"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2024.3449075"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2003.819861"},{"key":"ref55","first-page":"36188","article-title":"AniFaceGAN: Animatable 3D-aware face image generation for video avatars","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Wu"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2022.3228707"},{"key":"ref57","first-page":"22","article-title":"Object re-identification using teacher-like and light students","volume-title":"Proc. Brit. Mach. Vis. Conf.","author":"Xie"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01536"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00754"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00231"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19821-2_41"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00068"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-88693-8_59"},{"key":"ref64","first-page":"37297","article-title":"Towards diverse and faithful one-shot adaption of generative adversarial networks","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Zhang"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20065-6_2"},{"key":"ref66","article-title":"Mind the gap: Domain gap control for single shot domain adaptation for generative adversarial networks","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhu"}],"container-title":["IEEE Transactions on Visualization and Computer Graphics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/2945\/10829748\/10430412.pdf?arnumber=10430412","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,14]],"date-time":"2025-01-14T19:48:47Z","timestamp":1736884127000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10430412\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,2]]},"references-count":66,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tvcg.2024.3364162","relation":{},"ISSN":["1077-2626","1941-0506","2160-9306"],"issn-type":[{"value":"1077-2626","type":"print"},{"value":"1941-0506","type":"electronic"},{"value":"2160-9306","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,2]]}}}