{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T16:20:05Z","timestamp":1775578805631,"version":"3.50.1"},"publisher-location":"Cham","reference-count":55,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031732010","type":"print"},{"value":"9783031732027","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T00:00:00Z","timestamp":1732147200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T00:00:00Z","timestamp":1732147200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73202-7_10","type":"book-chapter","created":{"date-parts":[[2024,11,20]],"date-time":"2024-11-20T14:19:27Z","timestamp":1732112367000},"page":"162-178","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":9,"title":["DreamMesh: Jointly Manipulating and Texturing Triangle Meshes for Text-to-3D Generation"],"prefix":"10.1007","author":[{"given":"Haibo","family":"Yang","sequence":"first","affiliation":[]},{"given":"Yang","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Yingwei","family":"Pan","sequence":"additional","affiliation":[]},{"given":"Ting","family":"Yao","sequence":"additional","affiliation":[]},{"given":"Zhineng","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Zuxuan","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Yu-Gang","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Tao","family":"Mei","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,21]]},"reference":[{"key":"10_CR1","doi-asserted-by":"crossref","unstructured":"Aigerman, N., Gupta, K., Kim, V.G., Chaudhuri, S., Saito, J., Groueix, T.: Neural jacobian fields: learning intrinsic mappings of arbitrary meshes. In: SIGGRAPH (2022)","DOI":"10.1145\/3528223.3530141"},{"key":"10_CR2","doi-asserted-by":"publisher","unstructured":"Chen, A., Xu, Z., Geiger, A., Yu, J., Su, H.: TensoRF: tensorial radiance fields. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) Computer Vision \u2013 ECCV 2022. ECCV 2022. LNCS, vol. 13692, pp. 333\u2013350. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19824-3_20","DOI":"10.1007\/978-3-031-19824-3_20"},{"key":"10_CR3","doi-asserted-by":"crossref","unstructured":"Chen, D.Z., Siddiqui, Y., Lee, H.Y., Tulyakov, S., Nie\u00dfner, M.: Text2tex: text-driven texture synthesis via diffusion models. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01701"},{"key":"10_CR4","doi-asserted-by":"crossref","unstructured":"Chen, R., Chen, Y., Jiao, N., Jia, K.: Fantasia3d: disentangling geometry and appearance for high-quality text-to-3d content creation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.02033"},{"key":"10_CR5","doi-asserted-by":"crossref","unstructured":"Chen, Y., Chen, J., Pan, Y., Tian, X., Mei, T.: 3D creation at your fingertips: from text or image to 3d assets. In: ACM MM (2023)","DOI":"10.1145\/3581783.3612678"},{"key":"10_CR6","doi-asserted-by":"crossref","unstructured":"Chen, Y., Pan, Y., Li, Y., Yao, T., Mei, T.: Control3d: towards controllable text-to-3d generation. In: ACM MM (2023)","DOI":"10.1145\/3581783.3612489"},{"key":"10_CR7","doi-asserted-by":"crossref","unstructured":"Chen, Y., Pan, Y., Yang, H., Yao, T., Mei, T.: VP3D: unleashing 2d visual prompt for text-to-3d generation. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00468"},{"key":"10_CR8","doi-asserted-by":"crossref","unstructured":"Chen, Y., Pan, Y., Yao, T., Tian, X., Mei, T.: Animating your life: real-time video-to-animation translation. In: ACM MM (2019)","DOI":"10.1145\/3343031.3350593"},{"key":"10_CR9","doi-asserted-by":"crossref","unstructured":"Chen, Y., Pan, Y., Yao, T., Tian, X., Mei, T.: Mocycle-GAN: unpaired video-to-video translation. In: ACM MM (2019)","DOI":"10.1145\/3343031.3350937"},{"key":"10_CR10","doi-asserted-by":"crossref","unstructured":"Cheng, Y.C., Lee, H.Y., Tuyakov, S., Schwing, A., Gui, L.: SDFusion: multimodal 3d shape completion, reconstruction, and generation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00433"},{"key":"10_CR11","unstructured":"Fuji\u00a0Tsang, C., et al.: Kaolin: a pytorch library for accelerating 3d deep learning research (2022). https:\/\/github.com\/NVIDIAGameWorks\/kaolin"},{"key":"10_CR12","doi-asserted-by":"crossref","unstructured":"Gao, C., Jiang, B., Li, X., Zhang, Y., Yu, Q.: Genesistex: adapting image denoising diffusion to texture space. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00442"},{"key":"10_CR13","doi-asserted-by":"crossref","unstructured":"Gao, W., Aigerman, N., Thibault, G., Kim, V., Hanocka, R.: Textdeformer: geometry manipulation using text guidance. In: SIGGRAPH (2023)","DOI":"10.1145\/3588432.3591552"},{"key":"10_CR14","unstructured":"Hasselgren, J., Munkberg, J., Lehtinen, J., Aittala, M., Laine, S.: Appearance-driven automatic 3d model simplification. In: EGSR (2021)"},{"key":"10_CR15","unstructured":"He, Y., et al.: T$$^3$$bench: benchmarking current progress in text-to-3d generation. arxiv preprint arXiv:2310.02977 (2023)"},{"key":"10_CR16","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In: NeurIPS (2020)"},{"key":"10_CR17","unstructured":"Ho, J., Salimans, T.: Classifier-free diffusion guidance. In: NeurIPS Workshop (2022)"},{"key":"10_CR18","unstructured":"Ho, J., Salimans, T., Gritsenko, A., Chan, W., Norouzi, M., Fleet, D.J.: Video diffusion models. arXiv preprint arXiv:2204.03458 (2022)"},{"key":"10_CR19","doi-asserted-by":"crossref","unstructured":"Huang, Q., Huang, X., Sun, B., Zhang, Z., Jiang, J., Bajaj, C.: Arapreg: an as-rigid-as possible regularization loss for learning deformable shape generators. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.00576"},{"key":"10_CR20","unstructured":"Jun, H., Nichol, A.: Shap-E: generating conditional 3d implicit functions. arXiv preprint arXiv:2305.02463 (2023)"},{"key":"10_CR21","unstructured":"Katzir, O., Patashnik, O., Cohen-Or, D., Lischinski, D.: Noise-free score distillation. In: ICLR (2024)"},{"key":"10_CR22","unstructured":"Khalid, N.M., Xie, T., Belilovsky, E., Tiberiu, P.: Clip-mesh: generating textured meshes from text using pretrained image-text models. In: SIGGRAPH (2022)"},{"key":"10_CR23","doi-asserted-by":"crossref","unstructured":"Laine, S., Hellsten, J., Karras, T., Seol, Y., Lehtinen, J., Aila, T.: Modular primitives for high-performance differentiable rendering. ACM Trans. Graph. (2020)","DOI":"10.1145\/3414685.3417861"},{"key":"10_CR24","doi-asserted-by":"crossref","unstructured":"Li, M., Duan, Y., Zhou, J., Lu, J.: Diffusion-SDF: text-to-shape via voxelized diffusion. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01216"},{"key":"10_CR25","doi-asserted-by":"crossref","unstructured":"Lin, C.H., et al.: Magic3d: high-resolution text-to-3d content creation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00037"},{"key":"10_CR26","doi-asserted-by":"crossref","unstructured":"Metzer, G., Richardson, E., Patashnik, O., Giryes, R., Cohen-Or, D.: Latent-nerf for shape-guided generation of 3d shapes and textures. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01218"},{"key":"10_CR27","doi-asserted-by":"crossref","unstructured":"Michel, O., Bar-On, R., Liu, R., Benaim, S., Hanocka, R.: Text2mesh: text-driven neural stylization for meshes. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01313"},{"key":"10_CR28","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"405","DOI":"10.1007\/978-3-030-58452-8_24","volume-title":"Computer Vision \u2013 ECCV 2020","author":"B Mildenhall","year":"2020","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: representing scenes as neural radiance fields for view synthesis. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 405\u2013421. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_24"},{"key":"10_CR29","unstructured":"Nichol, A., et al.: Glide: towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741 (2021)"},{"key":"10_CR30","unstructured":"Nichol, A., Jun, H., Dhariwal, P., Mishkin, P., Chen, M.: Point-e: a system for generating 3d point clouds from complex prompts. arXiv preprint arXiv:2212.08751 (2022)"},{"key":"10_CR31","doi-asserted-by":"crossref","unstructured":"Pan, Y., Qiu, Z., Yao, T., Li, H., Mei, T.: To create what you tell: generating videos from captions. In: ACM Multimedia (2017)","DOI":"10.1145\/3123266.3127905"},{"key":"10_CR32","unstructured":"Podell, D., et al.: SDXL: improving latent diffusion models for high-resolution image synthesis. In: ICLR (2024)"},{"key":"10_CR33","unstructured":"Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: text-to-3d using 2d diffusion. In: ICLR (2023)"},{"key":"10_CR34","doi-asserted-by":"crossref","unstructured":"Qian, Y., et al.: Boosting diffusion models with moving average sampling in frequency domain. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00851"},{"key":"10_CR35","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: ICML (2021)"},{"key":"10_CR36","unstructured":"Ramesh, A., Dhariwal, P., Nichol, A., Chu, C., Chen, M.: Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125 (2022)"},{"key":"10_CR37","doi-asserted-by":"crossref","unstructured":"Richardson, E., Metzer, G., Alaluf, Y., Giryes, R., Cohen-Or, D.: Texture: text-guided texturing of 3d shapes. In: SIGGRAPH (2023)","DOI":"10.1145\/3588432.3591503"},{"key":"10_CR38","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"10_CR39","unstructured":"Saharia, C., et al.: Photorealistic text-to-image diffusion models with deep language understanding. In: NeurIPS (2022)"},{"key":"10_CR40","unstructured":"Shen, T., Gao, J., Yin, K., Liu, M.Y., Fidler, S.: Deep marching tetrahedra: a hybrid representation for high-resolution 3d shape synthesis. In: NeurIPS (2021)"},{"key":"10_CR41","unstructured":"Shi, Y., Wang, P., Ye, J., Long, M., Li, K., Yang, X.: Mvdream: multi-view diffusion for 3d generation. In: ICLR (2024)"},{"key":"10_CR42","unstructured":"Sorkine, O., Alexa, M.: As-rigid-as-possible surface modeling. In: SGP. Citeseer (2007)"},{"key":"10_CR43","doi-asserted-by":"crossref","unstructured":"Sorkine, O., Cohen-Or, D., Lipman, Y., Alexa, M., R\u00f6ssl, C., Seidel, H.P.: Laplacian surface editing. In: SGP (2004)","DOI":"10.1145\/1057432.1057456"},{"key":"10_CR44","doi-asserted-by":"crossref","unstructured":"Sun, C., Sun, M., Chen, H.: Direct voxel grid optimization: super-fast convergence for radiance fields reconstruction. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00538"},{"key":"10_CR45","unstructured":"Tang, J., Markhasin, L., Wang, B., Thies, J., Nie\u00dfner, M.: Neural shape deformation priors. In: NeurIPS (2022)"},{"key":"10_CR46","unstructured":"Tang, J., Ren, J., Zhou, H., Liu, Z., Zeng, G.: Dreamgaussian: generative gaussian splatting for efficient 3d content creation. In: ICLR (2024)"},{"key":"10_CR47","doi-asserted-by":"crossref","unstructured":"Wang, H., Du, X., Li, J., Yeh, R.A., Shakhnarovich, G.: Score jacobian chaining: lifting pretrained 2d diffusion models for 3d generation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01214"},{"key":"10_CR48","unstructured":"Wang, Z., et al.: Prolificdreamer: high-fidelity and diverse text-to-3d generation with variational score distillation. In: NeurIPS (2023)"},{"key":"10_CR49","doi-asserted-by":"crossref","unstructured":"Yang, H., Chen, Y., Pan, Y., Yao, T., Chen, Z., Mei, T.: 3Dstyle-diffusion: pursuing fine-grained text-driven 3d stylization with 2d diffusion models. In: ACM MM (2023)","DOI":"10.1145\/3581783.3612363"},{"key":"10_CR50","doi-asserted-by":"crossref","unstructured":"Yang, R., Srivastava, P., Mandt, S.: Diffusion probabilistic modeling for video generation. arXiv preprint arXiv:2203.09481 (2022)","DOI":"10.3390\/e25101469"},{"key":"10_CR51","unstructured":"Young., J.: Xatlas: mesh parameterization\/UV unwrapping library (2022). https:\/\/github.com\/jpcy\/xatlas"},{"key":"10_CR52","unstructured":"Yu, X., Guo, Y.C., Li, Y., Liang, D., Zhang, S.H., Qi, X.: Text-to-3d with classifier score distillation. In: ICLR (2024)"},{"key":"10_CR53","doi-asserted-by":"crossref","unstructured":"Zhang, Z., et al.: Trip: temporal residual learning with image noise prior for image-to-video diffusion models. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00828"},{"key":"10_CR54","unstructured":"Zhu, J., Zhuang, P., Koyejo, S.: HIFA: high-fidelity text-to-3d generation with advanced diffusion guidance. In: ICLR (2024)"},{"key":"10_CR55","doi-asserted-by":"crossref","unstructured":"Zhu, R., et al.: SD-DiT: unleashing the power of self-supervised discrimination in diffusion transformer. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00806"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73202-7_10","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,20]],"date-time":"2024-11-20T15:07:20Z","timestamp":1732115240000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73202-7_10"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,21]]},"ISBN":["9783031732010","9783031732027"],"references-count":55,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73202-7_10","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,21]]},"assertion":[{"value":"21 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}