{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:44:29Z","timestamp":1777657469280,"version":"3.51.4"},"publisher-location":"Cham","reference-count":64,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031732225","type":"print"},{"value":"9783031732232","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,8]],"date-time":"2024-11-08T00:00:00Z","timestamp":1731024000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,8]],"date-time":"2024-11-08T00:00:00Z","timestamp":1731024000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73223-2_8","type":"book-chapter","created":{"date-parts":[[2024,11,7]],"date-time":"2024-11-07T18:50:03Z","timestamp":1731005403000},"page":"122-139","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":16,"title":["VividDreamer: Invariant Score Distillation for\u00a0Hyper-Realistic Text-to-3D Generation"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0002-0851-5546","authenticated-orcid":false,"given":"Wenjie","family":"Zhuo","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4131-1222","authenticated-orcid":false,"given":"Fan","family":"Ma","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9572-2345","authenticated-orcid":false,"given":"Hehe","family":"Fan","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0512-880X","authenticated-orcid":false,"given":"Yi","family":"Yang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,8]]},"reference":[{"key":"8_CR1","unstructured":"Armandpour, M., Zheng, H., Sadeghian, A., Sadeghian, A., Zhou, M.: Re-imagine the negative prompt algorithm: Transform 2d diffusion into 3d, alleviate janus problem and beyond. arXiv preprint arXiv:2304.04968 (2023)"},{"key":"8_CR2","unstructured":"Chang, A.X., et al.: ShapeNet: An Information-Rich 3D Model Repository. Tech. Rep. arXiv:1512.03012 [cs.GR], Stanford University \u2014 Princeton University \u2014 Toyota Technological Institute at Chicago (2015)"},{"key":"8_CR3","unstructured":"Chen, G., Wang, W.: A survey on 3d gaussian splatting. arXiv preprint arXiv:2401.03890 (2024)"},{"key":"8_CR4","doi-asserted-by":"crossref","unstructured":"Chen, R., Chen, Y., Jiao, N., Jia, K.: Fantasia3d: Disentangling geometry and appearance for high-quality text-to-3d content creation. arXiv preprint arXiv:2303.13873 (2023)","DOI":"10.1109\/ICCV51070.2023.02033"},{"key":"8_CR5","doi-asserted-by":"crossref","unstructured":"Chen, Z., Wang, F., Liu, H.: Text-to-3d using gaussian splatting (2023)","DOI":"10.1109\/CVPR52733.2024.02022"},{"key":"8_CR6","doi-asserted-by":"crossref","unstructured":"Deitke, M., et\u00a0al.: Objaverse-xl: A universe of 10m+ 3d objects. Adv. Neural Inform. Process. Syst. 36 (2024)","DOI":"10.1109\/CVPR52729.2023.01263"},{"key":"8_CR7","doi-asserted-by":"crossref","unstructured":"Deitke, M., et al.: Objaverse: a universe of annotated 3d objects. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 13142\u201313153 (2023)","DOI":"10.1109\/CVPR52729.2023.01263"},{"key":"8_CR8","first-page":"8780","volume":"34","author":"P Dhariwal","year":"2021","unstructured":"Dhariwal, P., Nichol, A.: Diffusion models beat gans on image synthesis. Adv. Neural. Inf. Process. Syst. 34, 8780\u20138794 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"8_CR9","unstructured":"Gao, J., Fu, Y., Wang, Y., Qian, X., Feng, J., Fu, Y.: Mind-3d: Reconstruct high-quality 3d objects in human brain (2024). https:\/\/arxiv.org\/abs\/2312.07485"},{"key":"8_CR10","doi-asserted-by":"crossref","unstructured":"Gao, J., et al.: Coarse-to-fine amodal segmentation with shape prior (2023). https:\/\/arxiv.org\/abs\/2308.16825","DOI":"10.1109\/ICCV51070.2023.00122"},{"key":"8_CR11","unstructured":"Guo, Y.C., et al.: threestudio: A unified framework for 3d content generation. https:\/\/github.com\/threestudio-project\/threestudio (2023)"},{"key":"8_CR12","doi-asserted-by":"crossref","unstructured":"Hessel, J., Holtzman, A., Forbes, M., Bras, R.L., Choi, Y.: Clipscore: A reference-free evaluation metric for image captioning (2022)","DOI":"10.18653\/v1\/2021.emnlp-main.595"},{"key":"8_CR13","first-page":"6840","volume":"33","author":"J Ho","year":"2020","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Adv. Neural. Inf. Process. Syst. 33, 6840\u20136851 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"8_CR14","unstructured":"Ho, J., Salimans, T.: Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598 (2022)"},{"key":"8_CR15","unstructured":"Hong, Y., et al.: Lrm: Large reconstruction model for single image to 3d. arXiv preprint arXiv:2311.04400 (2023)"},{"key":"8_CR16","unstructured":"Hu, E.J., et al.: Lora: Low-rank adaptation of large language models (2021)"},{"key":"8_CR17","unstructured":"Huang, Y., Wang, J., Shi, Y., Qi, X., Zha, Z.J., Zhang, L.: Dreamtime: An improved optimization strategy for text-to-3d content creation. arXiv preprint arXiv:2306.12422 (2023)"},{"key":"8_CR18","doi-asserted-by":"crossref","unstructured":"Jain, A., Mildenhall, B., Barron, J.T., Abbeel, P., Poole, B.: Zero-shot text-guided object generation with dream fields (2022)","DOI":"10.1109\/CVPR52688.2022.00094"},{"key":"8_CR19","unstructured":"Jun, H., Nichol, A.: Shap-e: Generating conditional 3d implicit functions. arXiv preprint arXiv:2305.02463 (2023)"},{"key":"8_CR20","first-page":"26565","volume":"35","author":"T Karras","year":"2022","unstructured":"Karras, T., Aittala, M., Aila, T., Laine, S.: Elucidating the design space of diffusion-based generative models. Adv. Neural. Inf. Process. Syst. 35, 26565\u201326577 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"8_CR21","unstructured":"Katzir, O., Patashnik, O., Cohen-Or, D., Lischinski, D.: Noise-free score distillation. arXiv preprint arXiv:2310.17590 (2023)"},{"key":"8_CR22","doi-asserted-by":"crossref","unstructured":"Kerbl, B., Kopanas, G., Leimk\u00fchler, T., Drettakis, G.: 3d gaussian splatting for real-time radiance field rendering. ACM Trans. Graph. 42(4) (2023). https:\/\/repo-sam.inria.fr\/fungraph\/3d-gaussian-splatting\/","DOI":"10.1145\/3592433"},{"key":"8_CR23","unstructured":"Li, J., et al.: Instant3d: Fast text-to-3d with sparse-view generation and large reconstruction model (2023)"},{"key":"8_CR24","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation (2022)"},{"key":"8_CR25","doi-asserted-by":"crossref","unstructured":"Liang, Y., Yang, X., Lin, J., Li, H., Xu, X., Chen, Y.: Luciddreamer: Towards high-fidelity text-to-3d generation via interval score matching (2023)","DOI":"10.1109\/CVPR52733.2024.00623"},{"key":"8_CR26","doi-asserted-by":"crossref","unstructured":"Lin, C.H., et al.: Magic3d: high-resolution text-to-3d content creation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 300\u2013309 (2023)","DOI":"10.1109\/CVPR52729.2023.00037"},{"key":"8_CR27","unstructured":"Liu, Met al.: One-2-3-45: any single image to 3d mesh in 45 seconds without per-shape optimization. Adv. Neural Inform. Process. Syst. 36 (2024)"},{"key":"8_CR28","doi-asserted-by":"crossref","unstructured":"Liu, R., Wu, R., Hoorick, B.V., Tokmakov, P., Zakharov, S., Vondrick, C.: Zero-1-to-3: Zero-shot one image to 3d object (2023)","DOI":"10.1109\/ICCV51070.2023.00853"},{"key":"8_CR29","unstructured":"Liu, X., Zhang, X., Ma, J., Peng, J., et\u00a0al.: Instaflow: one step is enough for high-quality diffusion-based text-to-image generation. In: The Twelfth International Conference on Learning Representations (2023)"},{"key":"8_CR30","unstructured":"Liu, Y., et al.: Syncdreamer: Generating multiview-consistent images from a single-view image. arXiv preprint arXiv:2309.03453 (2023)"},{"key":"8_CR31","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization (2019)"},{"key":"8_CR32","first-page":"5775","volume":"35","author":"C Lu","year":"2022","unstructured":"Lu, C., Zhou, Y., Bao, F., Chen, J., Li, C., Zhu, J.: Dpm-solver: a fast ode solver for diffusion probabilistic model sampling in around 10 steps. Adv. Neural. Inf. Process. Syst. 35, 5775\u20135787 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"8_CR33","unstructured":"Luo, T., Rockwell, C., Lee, H., Johnson, J.: Scalable 3d captioning with pretrained models (2023)"},{"key":"8_CR34","doi-asserted-by":"crossref","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: Representing scenes as neural radiance fields for view synthesis (2020)","DOI":"10.1007\/978-3-030-58452-8_24"},{"key":"8_CR35","doi-asserted-by":"publisher","unstructured":"M\u00fcller, T., Evans, A., Schied, C., Keller, A.: Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph. 41(4), 102:1\u2013102:15 (2022). https:\/\/doi.org\/10.1145\/3528223.3530127","DOI":"10.1145\/3528223.3530127"},{"key":"8_CR36","unstructured":"Nichol, A., et al.: Glide: Towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741 (2021)"},{"key":"8_CR37","unstructured":"Nichol, A., Jun, H., Dhariwal, P., Mishkin, P., Chen, M.: Point-e: A system for generating 3d point clouds from complex prompts. arXiv preprint arXiv:2212.08751 (2022)"},{"key":"8_CR38","unstructured":"van\u00a0den Oord, A., et al.: Parallel wavenet: Fast high-fidelity speech synthesis (2017)"},{"key":"8_CR39","unstructured":"Park, D.H., Azadi, S., Liu, X., Darrell, T., Rohrbach, A.: Benchmark for compositional text-to-image synthesis. In: Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1) (2021)"},{"key":"8_CR40","unstructured":"Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988 (2022)"},{"key":"8_CR41","unstructured":"Qian, G., et\u00a0al.: Magic123: One image to high-quality 3d object generation using both 2d and 3d diffusion priors. arXiv preprint arXiv:2306.17843 (2023)"},{"key":"8_CR42","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision (2021)"},{"key":"8_CR43","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"8_CR44","first-page":"36479","volume":"35","author":"C Saharia","year":"2022","unstructured":"Saharia, C., et al.: Photorealistic text-to-image diffusion models with deep language understanding. Adv. Neural. Inf. Process. Syst. 35, 36479\u201336494 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"8_CR45","unstructured":"Salimans, T., Ho, J.: Progressive distillation for fast sampling of diffusion models. arXiv preprint arXiv:2202.00512 (2022)"},{"key":"8_CR46","doi-asserted-by":"crossref","unstructured":"Sanghi, A., et al.: Clip-forge: Towards zero-shot text-to-shape generation (2022)","DOI":"10.1109\/CVPR52688.2022.01805"},{"key":"8_CR47","unstructured":"Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502 (2020)"},{"key":"8_CR48","unstructured":"Song, Y., Dhariwal, P., Chen, M., Sutskever, I.: Consistency models (2023)"},{"key":"8_CR49","unstructured":"Song, Y., Sohl-Dickstein, J., Kingma, D.P., Kumar, A., Ermon, S., Poole, B.: Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456 (2020)"},{"key":"8_CR50","unstructured":"Tang, J., Ren, J., Zhou, H., Liu, Z., Zeng, G.: Dreamgaussian: Generative gaussian splatting for efficient 3d content creation. arXiv preprint arXiv:2309.16653 (2023)"},{"key":"8_CR51","doi-asserted-by":"crossref","unstructured":"Wang, H., Du, X., Li, J., Yeh, R.A., Shakhnarovich, G.: Score jacobian chaining: lifting pretrained 2d diffusion models for 3d generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12619\u201312629 (2023)","DOI":"10.1109\/CVPR52729.2023.01214"},{"key":"8_CR52","doi-asserted-by":"crossref","unstructured":"Wang, X., et al.: Animatabledreamer: Text-guided non-rigid 3d model generation and reconstruction with canonical score distillation. arXiv preprint arXiv:2312.03795 (2023)","DOI":"10.1007\/978-3-031-72698-9_19"},{"key":"8_CR53","unstructured":"Wang, Z., et al.: Prolificdreamer: High-fidelity and diverse text-to-3d generation with variational score distillation. Advances in Neural Information Processing Systems 36 (2024)"},{"key":"8_CR54","doi-asserted-by":"crossref","unstructured":"Wu, Z., Zhou, P., Yi, X., Yuan, X., Zhang, H.: Consistent3d: Towards consistent high-fidelity text-to-3d generation with deterministic sampling prior (2024)","DOI":"10.1109\/CVPR52733.2024.00944"},{"key":"8_CR55","unstructured":"Xu, Y., Yang, Z., Yang, Y.: Seeavatar: Photorealistic text-to-3d avatar generation with constrained geometry and appearance. arXiv preprint arXiv:2312.08889 (2023)"},{"key":"8_CR56","unstructured":"Yang, Z., Chen, G., Li, X., Wang, W., Yang, Y.: Doraemongpt: Toward understanding dynamic scenes with large language models (exemplified as a video agent) (2024)"},{"key":"8_CR57","unstructured":"Ye, J., et al.: Dreamreward: Text-to-3d generation with human preference. arXiv preprint arXiv:2403.14613 (2024)"},{"key":"8_CR58","unstructured":"Yi, T., et al.: Gaussiandreamer: Fast generation from text to 3d gaussian splatting with point cloud priors. arXiv preprint arXiv:2310.08529 (2023)"},{"key":"8_CR59","unstructured":"Yu, X., Guo, Y.C., Li, Y., Liang, D., Zhang, S.H., Qi, X.: Text-to-3d with classifier score distillation. arXiv preprint arXiv:2310.19415 (2023)"},{"key":"8_CR60","unstructured":"Zeng, X., et al.: Lion: Latent point diffusion models for 3d shape generation (2022)"},{"key":"8_CR61","doi-asserted-by":"crossref","unstructured":"Zhou, D., Li, Y., Ma, F., Zhang, X., Yang, Y.: Migc: multi-instance generation controller for text-to-image synthesis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6818\u20136828 (2024)","DOI":"10.1109\/CVPR52733.2024.00651"},{"key":"8_CR62","doi-asserted-by":"crossref","unstructured":"Zhou, Z., Ma, F., Fan, H., Yang, Y.: Headstudio: Text to animatable head avatars with 3d gaussian splatting. arXiv preprint arXiv:2402.06149 (2024)","DOI":"10.1007\/978-3-031-73411-3_9"},{"key":"8_CR63","unstructured":"Zhu, J., Zhuang, P.: Hifa: High-fidelity text-to-3d with advanced diffusion guidance. arXiv preprint arXiv:2305.18766 (2023)"},{"key":"8_CR64","doi-asserted-by":"crossref","unstructured":"Zhuo, W., Sun, Y., Wang, X., Zhu, L., Yang, Y.: Whitenedcse: whitening-based contrastive learning of sentence embeddings. In: Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 12135\u201312148 (2023)","DOI":"10.18653\/v1\/2023.acl-long.677"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73223-2_8","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,30]],"date-time":"2024-11-30T23:22:29Z","timestamp":1733008949000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73223-2_8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,8]]},"ISBN":["9783031732225","9783031732232"],"references-count":64,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73223-2_8","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,8]]},"assertion":[{"value":"8 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}