{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,25]],"date-time":"2025-11-25T06:59:12Z","timestamp":1764053952028,"version":"3.40.3"},"publisher-location":"Cham","reference-count":76,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031729829"},{"type":"electronic","value":"9783031729836"}],"license":[{"start":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T00:00:00Z","timestamp":1730160000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T00:00:00Z","timestamp":1730160000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72983-6_21","type":"book-chapter","created":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T09:34:20Z","timestamp":1730108060000},"page":"363-381","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["StructLDM: Structured Latent Diffusion for\u00a03D Human Generation"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3741-3701","authenticated-orcid":false,"given":"Tao","family":"Hu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2412-1141","authenticated-orcid":false,"given":"Fangzhou","family":"Hong","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4220-5958","authenticated-orcid":false,"given":"Ziwei","family":"Liu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,29]]},"reference":[{"key":"21_CR1","doi-asserted-by":"crossref","unstructured":"Abdal, R., et al.: Gaussian shell maps for efficient 3D human generation (2023)","DOI":"10.1109\/CVPR52733.2024.00902"},{"key":"21_CR2","unstructured":"Bergman, A.W., Kellnhofer, P., Wang, Y., Chan, E., Lindell, D.B., Wetzstein, G.: Generative neural articulated radiance fields. arXiv abs\/2206.14314 (2022). https:\/\/api.semanticscholar.org\/CorpusID:250113850"},{"key":"21_CR3","unstructured":"Bergman, A.W., Kellnhofer, P., Wang, Y., Chan, E.R., Lindell, D.B., Wetzstein, G.: Generative neural articulated radiance fields. arXiv preprint arXiv:2206.14314 (2022)"},{"key":"21_CR4","doi-asserted-by":"crossref","unstructured":"Cao, Y., Cao, Y.P., Han, K., Shan, Y., Wong, K.Y.K.: Dreamavatar: text-and-shape guided 3D human avatar generation via diffusion models. arXiv preprint arXiv:2304.00916 (2023)","DOI":"10.1109\/CVPR52733.2024.00097"},{"key":"21_CR5","unstructured":"Cao, Y., Cao, Y.P., Han, K., Shan, Y., Wong, K.Y.K.: Guide3D: create 3D avatars from text and image guidance. arXiv preprint arXiv:2308.09705 (2023)"},{"key":"21_CR6","doi-asserted-by":"crossref","unstructured":"Chan, E., et al.: Efficient geometry-aware 3D generative adversarial networks. arXiv abs\/2112.07945 (2021)","DOI":"10.1109\/CVPR52688.2022.01565"},{"key":"21_CR7","doi-asserted-by":"crossref","unstructured":"Chan, E.R., et\u00a0al.: Efficient geometry-aware 3D generative adversarial networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 16123\u201316133 (2022)","DOI":"10.1109\/CVPR52688.2022.01565"},{"key":"21_CR8","doi-asserted-by":"crossref","unstructured":"Chan, E.R., Monteiro, M., Kellnhofer, P., Wu, J., Wetzstein, G.: pi-GAN: periodic implicit generative adversarial networks for 3d-aware image synthesis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5799\u20135809 (2021)","DOI":"10.1109\/CVPR46437.2021.00574"},{"key":"21_CR9","doi-asserted-by":"crossref","unstructured":"Chen, X., et al.: GDNA: towards generative detailed neural avatars. arXiv (2022)","DOI":"10.1109\/CVPR52688.2022.01978"},{"key":"21_CR10","doi-asserted-by":"crossref","unstructured":"Chen, Y., et al.: UV volumes for real-time rendering of editable free-view human performance. In: 2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 16621\u201316631 (2022). https:\/\/api.semanticscholar.org\/CorpusID:247762811","DOI":"10.1109\/CVPR52729.2023.01595"},{"key":"21_CR11","unstructured":"Chen, Z., Hong, F., Mei, H., Wang, G., Yang, L., Liu, Z.: Primdiffusion: volumetric primitives diffusion for 3D human generation. In: Thirty-Seventh Conference on Neural Information Processing Systems (2023)"},{"key":"21_CR12","doi-asserted-by":"crossref","unstructured":"Dong, Z., Chen, X., Yang, J., Black, M.J., Hilliges, O., Geiger, A.: AG3D: learning to generate 3D avatars from 2D image collections. arXiv abs\/2305.02312 (2023). https:\/\/api.semanticscholar.org\/CorpusID:258461509","DOI":"10.1109\/ICCV51070.2023.01370"},{"key":"21_CR13","doi-asserted-by":"crossref","unstructured":"Fr\u00fchst\u00fcck, A., Singh, K.K., Shechtman, E., Mitra, N.J., Wonka, P., Lu, J.: InsetGAN for full-body image generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7723\u20137732 (2022)","DOI":"10.1109\/CVPR52688.2022.00757"},{"key":"21_CR14","doi-asserted-by":"crossref","unstructured":"Fu, J., et al.: StyleGAN-human: a data-centric odyssey of human generation. In: European Conference on Computer Vision (2022). https:\/\/api.semanticscholar.org\/CorpusID:248377018","DOI":"10.1007\/978-3-031-19787-1_1"},{"issue":"11","key":"21_CR15","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1145\/3422622","volume":"63","author":"I Goodfellow","year":"2020","unstructured":"Goodfellow, I., et al.: Generative adversarial networks. Commun. ACM 63(11), 139\u2013144 (2020)","journal-title":"Commun. ACM"},{"key":"21_CR16","doi-asserted-by":"crossref","unstructured":"Grigorev, A., et al.: Stylepeople: a generative model of fullbody human avatars. In: 2021 (CVPR), pp. 5147\u20135156 (2021)","DOI":"10.1109\/CVPR46437.2021.00511"},{"key":"21_CR17","unstructured":"Gu, J., et al.: Nerfdiff: single-image view synthesis with nerf-guided distillation from 3d-aware diffusion. In: International Conference on Machine Learning, pp. 11808\u201311826. PMLR (2023)"},{"key":"21_CR18","unstructured":"Gupta, A., Xiong, W., Nie, Y., Jones, I., O\u011fuz, B.: 3DGEN: triplane latent diffusion for textured mesh generation. arXiv preprint arXiv:2303.05371 (2023)"},{"key":"21_CR19","unstructured":"Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: GANs trained by a two time-scale update rule converge to a local Nash equilibrium. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"21_CR20","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In: Advances in Neural Information Processing Systems, vol. 33, pp. 6840\u20136851 (2020)"},{"key":"21_CR21","unstructured":"Hong, F., Chen, Z., Lan, Y., Pan, L., Liu, Z.: EVA3D: compositional 3D human generation from 2D image collections. arXiv abs\/2210.04888 (2022). https:\/\/api.semanticscholar.org\/CorpusID:252780848"},{"key":"21_CR22","doi-asserted-by":"crossref","unstructured":"Hong, F., Zhang, M., Pan, L., Cai, Z., Yang, L., Liu, Z.: Avatarclip: zero-shot text-driven generation and animation of 3D avatars. arXiv preprint arXiv:2205.08535 (2022)","DOI":"10.1145\/3528223.3530094"},{"key":"21_CR23","doi-asserted-by":"crossref","unstructured":"Hu, T., Hong, F., Liu, Z.: Surmo: surface-based 4D motion modeling for dynamic human rendering. In: Computer Vision and Pattern Recognition (CVPR) (2024)","DOI":"10.1109\/CVPR52733.2024.00626"},{"key":"21_CR24","doi-asserted-by":"crossref","unstructured":"Hu, T., Sarkar, K., Liu, L., Zwicker, M., Theobalt, C.: Egorenderer: rendering human avatars from egocentric camera images. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01426"},{"key":"21_CR25","doi-asserted-by":"publisher","unstructured":"Hu, T., et al.: HVTR++: image and pose driven human avatars using hybrid volumetric-textural rendering. IEEE Trans. Visual. Comput. Graph. 1\u201315 (2023). https:\/\/doi.org\/10.1109\/TVCG.2023.3297721","DOI":"10.1109\/TVCG.2023.3297721"},{"key":"21_CR26","doi-asserted-by":"crossref","unstructured":"Hu, T., Yu, T., Zheng, Z., Zhang, H., Liu, Y., Zwicker, M.: HVTR: hybrid volumetric-textural rendering for human avatars. In: 3DV (2022)","DOI":"10.1109\/3DV57658.2022.00032"},{"key":"21_CR27","doi-asserted-by":"crossref","unstructured":"Jain, A., Mildenhall, B., Barron, J.T., Abbeel, P., Poole, B.: Zero-shot text-guided object generation with dream fields (2022)","DOI":"10.1109\/CVPR52688.2022.00094"},{"issue":"4","key":"21_CR28","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3528223.3530104","volume":"41","author":"Y Jiang","year":"2022","unstructured":"Jiang, Y., Yang, S., Qiu, H., Wu, W., Loy, C.C., Liu, Z.: Text2human: text-driven controllable human image generation. ACM Trans. Graph. (TOG) 41(4), 1\u201311 (2022). https:\/\/doi.org\/10.1145\/3528223.3530104","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"21_CR29","unstructured":"Jun, H., Nichol, A.: Shap-e: generating conditional 3D implicit functions. arXiv preprint arXiv:2305.02463 (2023)"},{"key":"21_CR30","doi-asserted-by":"crossref","unstructured":"Kajiya, J.T., Herzen, B.V.: Ray tracing volume densities. In: Proceedings of the 11th Annual Conference on Computer Graphics and Interactive Techniques (1984)","DOI":"10.1145\/800031.808594"},{"key":"21_CR31","doi-asserted-by":"crossref","unstructured":"Karras, J., Holynski, A., Wang, T.C., Kemelmacher-Shlizerman, I.: Dreampose: fashion image-to-video synthesis via stable diffusion (2023)","DOI":"10.1109\/ICCV51070.2023.02073"},{"key":"21_CR32","unstructured":"Karras, T., et al.: Alias-free generative adversarial networks. In: Proceedings of NeurIPS (2021)"},{"key":"21_CR33","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: CVPR, pp. 4401\u20134410 (2019)","DOI":"10.1109\/CVPR.2019.00453"},{"key":"21_CR34","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4401\u20134410 (2019)","DOI":"10.1109\/CVPR.2019.00453"},{"key":"21_CR35","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of stylegan. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8110\u20138119 (2020)","DOI":"10.1109\/CVPR42600.2020.00813"},{"key":"21_CR36","doi-asserted-by":"crossref","unstructured":"Kerbl, B., Kopanas, G., Leimk\u00fchler, T., Drettakis, G.: 3D gaussian splatting for real-time radiance field rendering. ACM Trans. Graph. 42(4) (2023). https:\/\/repo-sam.inria.fr\/fungraph\/3d-gaussian-splatting\/","DOI":"10.1145\/3592433"},{"key":"21_CR37","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. In: ICLR (2015)"},{"issue":"4","key":"21_CR38","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3450626.3459884","volume":"40","author":"KM Lewis","year":"2021","unstructured":"Lewis, K.M., Varadharajan, S., Kemelmacher-Shlizerman, I.: TryonGAN: body-aware try-on via layered interpolation. ACM Tran. Graph. (TOG) 40(4), 1\u201310 (2021)","journal-title":"ACM Tran. Graph. (TOG)"},{"key":"21_CR39","unstructured":"Liu, X., et al.: Hyperhuman: hyper-realistic human generation with latent structural diffusion. arXiv preprint arXiv:2310.08579 (2023)"},{"key":"21_CR40","doi-asserted-by":"crossref","unstructured":"Liu, Z., Luo, P., Qiu, S., Wang, X., Tang, X.: Deepfashion: powering robust clothes recognition and retrieval with rich annotations. In: CVPR, pp. 1096\u20131104 (2016)","DOI":"10.1109\/CVPR.2016.124"},{"key":"21_CR41","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3450626.3459863","volume":"40","author":"S Lombardi","year":"2021","unstructured":"Lombardi, S., Simon, T., Schwartz, G., Zollhoefer, M., Sheikh, Y., Saragih, J.M.: Mixture of volumetric primitives for efficient neural rendering. ACM Trans. Graph. (TOG) 40, 1\u201313 (2021)","journal-title":"ACM Trans. Graph. (TOG)"},{"issue":"248","key":"21_CR42","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2816795.2818013","volume":"34","author":"M Loper","year":"2015","unstructured":"Loper, M., Mahmood, N., Romero, J., Pons-Moll, G., Black, M.J.: SMPL: a skinned multi-person linear model. ACM Trans. Graph. 34(248), 1\u201316 (2015)","journal-title":"ACM Trans. Graph."},{"key":"21_CR43","doi-asserted-by":"crossref","unstructured":"Luo, S., Hu, W.: Diffusion probabilistic models for 3d point cloud generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2837\u20132845 (2021)","DOI":"10.1109\/CVPR46437.2021.00286"},{"key":"21_CR44","doi-asserted-by":"crossref","unstructured":"Ma, Q., Saito, S., Yang, J., Tang, S., Black, M.J.: Scale: modeling clothed humans with a surface codec of articulated local elements. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01582"},{"key":"21_CR45","doi-asserted-by":"crossref","unstructured":"Ma, Q., Yang, J., Tang, S., Black, M.J.: The power of points for modeling humans in clothing. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01079"},{"key":"21_CR46","doi-asserted-by":"crossref","unstructured":"M\u00fcller, N., Siddiqui, Y., Porzi, L., Bul\u00f2, S.R., Kontschieder, P., Nie\u00dfner, M.: Diffrf: rendering-guided 3D radiance field diffusion. arXiv preprint arXiv:2212.01206 (2022)","DOI":"10.1109\/CVPR52729.2023.00421"},{"key":"21_CR47","unstructured":"Nichol, A., Jun, H., Dhariwal, P., Mishkin, P., Chen, M.: Point-e: a system for generating 3D point clouds from complex prompts. arXiv preprint arXiv:2212.08751 (2022)"},{"key":"21_CR48","unstructured":"Nichol, A.Q., Dhariwal, P.: Improved denoising diffusion probabilistic models. In: International Conference on Machine Learning, pp. 8162\u20138171. PMLR (2021)"},{"key":"21_CR49","doi-asserted-by":"crossref","unstructured":"Niemeyer, M., Geiger, A.: Giraffe: representing scenes as compositional generative neural feature fields. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11453\u201311464 (2021)","DOI":"10.1109\/CVPR46437.2021.01129"},{"key":"21_CR50","doi-asserted-by":"crossref","unstructured":"Noguchi, A., Sun, X., Lin, S., Harada, T.: Unsupervised learning of efficient geometry-aware neural articulated representations. arXiv preprint arXiv:2204.08839 (2022)","DOI":"10.1007\/978-3-031-19790-1_36"},{"key":"21_CR51","doi-asserted-by":"publisher","unstructured":"Noguchi, A., Sun, X., Lin, S., Harada, T.: Unsupervised learning of efficient geometry-aware neural articulated representations. In: In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13677, pp. pp. 597\u2013614. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19790-1_36, https:\/\/api.semanticscholar.org\/CorpusID:248239659","DOI":"10.1007\/978-3-031-19790-1_36"},{"key":"21_CR52","unstructured":"Ntavelis, E., Siarohin, A., Olszewski, K., Wang, C., Gool, L.V., Tulyakov, S.: Autodecoding latent 3D diffusion models (2023)"},{"key":"21_CR53","doi-asserted-by":"crossref","unstructured":"Or-El, R., Luo, X., Shan, M., Shechtman, E., Park, J.J., Kemelmacher-Shlizerman, I.: Stylesdf: high-resolution 3d-consistent image and geometry generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13503\u201313513 (2022)","DOI":"10.1109\/CVPR52688.2022.01314"},{"key":"21_CR54","unstructured":"Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: text-to-3D using 2D diffusion. arXiv (2022)"},{"key":"21_CR55","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"21_CR56","doi-asserted-by":"crossref","unstructured":"Remelli, E., et al.: Drivable volumetric avatars using texel-aligned features. In: ACM SIGGRAPH (2022)","DOI":"10.1145\/3528233.3530740"},{"key":"21_CR57","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"21_CR58","unstructured":"Sarkar, K., Golyanik, V., Liu, L., Theobalt, C.: Style and pose control for image synthesis of humans from a single monocular view. arXiv preprint arXiv:2102.11263 (2021)"},{"key":"21_CR59","doi-asserted-by":"crossref","unstructured":"Sarkar, K., Liu, L., Golyanik, V., Theobalt, C.: HumanGAN: a generative model of humans images. arXiv preprint arXiv:2103.06902 (2021)","DOI":"10.1109\/3DV53792.2021.00036"},{"key":"21_CR60","unstructured":"Shelhamer, E., Long, J., Darrell, T.: Fully convolutional networks for semantic segmentation. In: 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3431\u20133440 (2014). https:\/\/api.semanticscholar.org\/CorpusID:1629541"},{"key":"21_CR61","doi-asserted-by":"crossref","unstructured":"Shue, J.R., Chan, E.R., Po, R., Ankner, Z., Wu, J., Wetzstein, G.: 3D neural field generation using triplane diffusion. arXiv preprint arXiv:2211.16677 (2022)","DOI":"10.1109\/CVPR52729.2023.02000"},{"key":"21_CR62","unstructured":"Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502 (2020)"},{"key":"21_CR63","doi-asserted-by":"crossref","unstructured":"Sun, J., et al.: Next3D: generative neural texture rasterization for 3D-aware head avatars. In: 2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 20991\u201321002 (2022). https:\/\/api.semanticscholar.org\/CorpusID:253735045","DOI":"10.1109\/CVPR52729.2023.02011"},{"key":"21_CR64","unstructured":"Renderpeople (2018). https:\/\/renderpeople.com\/3d-people\/"},{"key":"21_CR65","unstructured":"Wang, T., et al.: Disco: disentangled control for referring human dance generation in real world. arXiv preprint arXiv:2307.00040 (2023)"},{"key":"21_CR66","doi-asserted-by":"crossref","unstructured":"Wang, T., et\u00a0al.: Rodin: a generative model for sculpting 3D digital avatars using diffusion. arXiv preprint arXiv:2212.06135 (2022)","DOI":"10.1109\/CVPR52729.2023.00443"},{"key":"21_CR67","doi-asserted-by":"crossref","unstructured":"Yu, T., Zheng, Z., Guo, K., Liu, P., Dai, Q., Liu, Y.: Function4D: real-time human volumetric capture from very sparse consumer RGBD sensors. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR2021) (2021)","DOI":"10.1109\/CVPR46437.2021.00569"},{"key":"21_CR68","unstructured":"Zablotskaia, P., Siarohin, A., Zhao, B., Sigal, L.: Dwnet: dense warp-based network for pose-guided human video generation. arXiv preprint arXiv:1910.09139 (2019)"},{"key":"21_CR69","doi-asserted-by":"crossref","unstructured":"Zeng, W., Ouyang, W., Luo, P., Liu, W., Wang, X.: 3D human mesh regression with dense correspondence. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 7052\u20137061 (2020). https:\/\/api.semanticscholar.org\/CorpusID:219558352","DOI":"10.1109\/CVPR42600.2020.00708"},{"key":"21_CR70","unstructured":"Zeng, X., et al.: Lion: latent point diffusion models for 3D shape generation. arXiv preprint arXiv:2210.06978 (2022)"},{"key":"21_CR71","doi-asserted-by":"crossref","unstructured":"Zhang, J., et al.: Avatargen: a 3D generative model for animatable human avatars. arXiv preprint arXiv:2208.00561 (2022)","DOI":"10.1007\/978-3-031-25066-8_39"},{"key":"21_CR72","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"21_CR73","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: CVPR, pp. 586\u2013595 (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"key":"21_CR74","doi-asserted-by":"crossref","unstructured":"Zheng, Z., Huang, H., Yu, T., Zhang, H., Guo, Y., Liu, Y.: Structured local radiance fields for human avatar modeling. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2022)","DOI":"10.1109\/CVPR52688.2022.01543"},{"key":"21_CR75","doi-asserted-by":"crossref","unstructured":"Zhou, L., Du, Y., Wu, J.: 3D shape generation and completion through point-voxel diffusion. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5826\u20135835 (2021)","DOI":"10.1109\/ICCV48922.2021.00577"},{"key":"21_CR76","doi-asserted-by":"crossref","unstructured":"Zwicker, M., Pfister, H., van Baar, J., Gross, M.H.: Ewa splatting. IEEE Trans. Vis. Comput. Graph. 8, 223\u2013238 (2002). https:\/\/api.semanticscholar.org\/CorpusID:9389692","DOI":"10.1109\/TVCG.2002.1021576"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72983-6_21","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T10:02:20Z","timestamp":1730109740000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72983-6_21"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,29]]},"ISBN":["9783031729829","9783031729836"],"references-count":76,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72983-6_21","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,10,29]]},"assertion":[{"value":"29 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}