{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:26:43Z","timestamp":1777656403015,"version":"3.51.4"},"publisher-location":"Cham","reference-count":46,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031729126","type":"print"},{"value":"9783031729133","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72913-3_9","type":"book-chapter","created":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T21:45:52Z","timestamp":1733089552000},"page":"146-163","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["GenRC: Generative 3D Room Completion from\u00a0Sparse Image Collections"],"prefix":"10.1007","author":[{"given":"Ming-Feng","family":"Li","sequence":"first","affiliation":[]},{"given":"Yueh-Feng","family":"Ku","sequence":"additional","affiliation":[]},{"given":"Hong-Xuan","family":"Yen","sequence":"additional","affiliation":[]},{"given":"Chi","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Yu-Lun","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Albert Y. C.","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Cheng-Hao","family":"Kuo","sequence":"additional","affiliation":[]},{"given":"Min","family":"Sun","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,2]]},"reference":[{"key":"9_CR1","doi-asserted-by":"crossref","unstructured":"Anciukevi\u010dius, T., et al.: RenderDiffusion: image diffusion for 3D reconstruction, inpainting and generation. In: CVPR, pp. 12608\u201312618 (2023)","DOI":"10.1109\/CVPR52729.2023.01213"},{"key":"9_CR2","unstructured":"Bae, G., Budvytis, I., Cipolla, R.: IronDepth: iterative refinement of single-view depth using surface normal and its uncertainty. In: BMVC (2022)"},{"key":"9_CR3","unstructured":"Bar-Tal, O., Yariv, L., Lipman, Y., Dekel, T.: MultiDiffusion: fusing diffusion paths for controlled image generation. In: ICML (2023)"},{"key":"9_CR4","unstructured":"Baruch, G., et\u00a0al.: ARKitScenes: a diverse real-world dataset for 3D indoor scene understanding using mobile RGB-D data. arXiv preprint arXiv:2111.08897 (2021)"},{"key":"9_CR5","doi-asserted-by":"crossref","unstructured":"Cai, S., et al.: DiffDreamer: towards consistent unsupervised single-view scene extrapolation with conditional diffusion models. In: ICCV, pp. 2139\u20132150 (2023)","DOI":"10.1109\/ICCV51070.2023.00204"},{"key":"9_CR6","doi-asserted-by":"crossref","unstructured":"Chang, A., et al.: Matterport3D: learning from RGB-D data in indoor environments. In: International Conference on 3D Vision (3DV) (2017)","DOI":"10.1109\/3DV.2017.00081"},{"key":"9_CR7","unstructured":"Chang, A.X., et\u00a0al.: ShapeNet: an information-rich 3D model repository. arXiv preprint arXiv:1512.03012 (2015)"},{"key":"9_CR8","doi-asserted-by":"crossref","unstructured":"Cheng, Y.C., Lee, H.Y., Tulyakov, S., Schwing, A.G., Gui, L.Y.: SDFusion: multimodal 3D shape completion, reconstruction, and generation. In: CVPR, pp. 4456\u20134465 (2023)","DOI":"10.1109\/CVPR52729.2023.00433"},{"key":"9_CR9","doi-asserted-by":"crossref","unstructured":"Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nie\u00dfner, M.: ScanNet: richly-annotated 3D reconstructions of indoor scenes. In: CVPR, pp. 5828\u20135839 (2017)","DOI":"10.1109\/CVPR.2017.261"},{"key":"9_CR10","doi-asserted-by":"crossref","unstructured":"Erko\u00e7, Z., Ma, F., Shan, Q., Nie\u00dfner, M., Dai, A.: HyperDiffusion: generating implicit neural fields with weight-space diffusion. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01315"},{"key":"9_CR11","unstructured":"Fridman, R., Abecasis, A., Kasten, Y., Dekel, T.: SceneScape: text-driven consistent scene generation. In: NeurIPS (2023)"},{"key":"9_CR12","unstructured":"Gal, R., et al.: An image is worth one word: personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618 (2022)"},{"key":"9_CR13","unstructured":"Gao, J., et al.: GET3D: a generative model of high quality 3D textured shapes learned from images, vol. 35, pp. 31841\u201331854 (2022)"},{"key":"9_CR14","first-page":"6840","volume":"33","author":"J Ho","year":"2020","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Adv. Neural. Inf. Process. Syst. 33, 6840\u20136851 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"9_CR15","doi-asserted-by":"crossref","unstructured":"H\u00f6llein, L., Cao, A., Owens, A., Johnson, J., Nie\u00dfner, M.: Text2Room: extracting textured 3D meshes from 2D text-to-image models. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00727"},{"key":"9_CR16","doi-asserted-by":"crossref","unstructured":"Johnson, J., et al.: Accelerating 3d deep learning with PyTorch3D. In: SIGGRAPH Asia 2020 Courses, p.\u00a01 (2020)","DOI":"10.1145\/3415263.3419160"},{"key":"9_CR17","unstructured":"Kasten, Y., Rahamim, O., Chechik, G.: Point-cloud completion with pretrained text-to-image diffusion models. In: NeurIPS (2023)"},{"key":"9_CR18","doi-asserted-by":"crossref","unstructured":"Lei, J., Tang, J., Jia, K.: RGBD2: generative scene synthesis via incremental view inpainting using RGBD diffusion models. In: CVPR, pp. 8422\u20138434 (2023)","DOI":"10.1109\/CVPR52729.2023.00814"},{"key":"9_CR19","doi-asserted-by":"publisher","unstructured":"Li, Z., Wang, Q., Snavely, N., Kanazawa, A.: InfiniteNature-Zero:: learning perpetual view generation of natural scenes from single images. In: ECCV, pp. 515\u2013534 (2022). https:\/\/doi.org\/10.1007\/978-3-031-19769-7_30","DOI":"10.1007\/978-3-031-19769-7_30"},{"key":"9_CR20","doi-asserted-by":"crossref","unstructured":"Lin, C.H., et al.: Magic3D: high-resolution text-to-3D content creation. In: CVPR, pp. 300\u2013309 (2023)","DOI":"10.1109\/CVPR52729.2023.00037"},{"key":"9_CR21","doi-asserted-by":"crossref","unstructured":"Liu, A., Tucker, R., Jampani, V., Makadia, A., Snavely, N., Kanazawa, A.: Infinite nature: perpetual view generation of natural scenes from a single image. In: ICCV, pp. 14458\u201314467 (2021)","DOI":"10.1109\/ICCV48922.2021.01419"},{"key":"9_CR22","unstructured":"Liu, M., et\u00a0al.: One-2-3-45: any single image to 3d mesh in 45 seconds without per-shape optimization. arXiv preprint arXiv:2306.16928 (2023)"},{"key":"9_CR23","doi-asserted-by":"crossref","unstructured":"Liu, R., Wu, R., Van\u00a0Hoorick, B., Tokmakov, P., Zakharov, S., Vondrick, C.: Zero-1-to-3: zero-shot one image to 3D object. In: ICCV, pp. 9298\u20139309 (2023)","DOI":"10.1109\/ICCV51070.2023.00853"},{"key":"9_CR24","doi-asserted-by":"crossref","unstructured":"Metzer, G., Richardson, E., Patashnik, O., Giryes, R., Cohen-Or, D.: Latent-NeRF for shape-guided generation of 3D shapes and textures. In: CVPR, pp. 12663\u201312673 (2023)","DOI":"10.1109\/CVPR52729.2023.01218"},{"key":"9_CR25","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"405","DOI":"10.1007\/978-3-030-58452-8_24","volume-title":"Computer Vision \u2013 ECCV 2020","author":"B Mildenhall","year":"2020","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: representing scenes as neural radiance fields for view synthesis. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 405\u2013421. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_24"},{"key":"9_CR26","doi-asserted-by":"crossref","unstructured":"M\u00fcller, N., Siddiqui, Y., Porzi, L., Bulo, S.R., Kontschieder, P., Nie\u00dfner, M.: DiffRF: rendering-guided 3D radiance field diffusion. In: CVPR, pp. 4328\u20134338 (2023)","DOI":"10.1109\/CVPR52729.2023.00421"},{"key":"9_CR27","unstructured":"Nichol, A., et al.: GLIDE: towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741 (2021)"},{"key":"9_CR28","doi-asserted-by":"crossref","unstructured":"Park, J.J., Florence, P., Straub, J., Newcombe, R., Lovegrove, S.: DeepSDF: learning continuous signed distance functions for shape representation. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00025"},{"key":"9_CR29","unstructured":"Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: DreamFusion: text-to-3D using 2D diffusion. arXiv preprint arXiv:2209.14988 (2022)"},{"key":"9_CR30","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: ICML, pp. 8748\u20138763. PMLR (2021)"},{"key":"9_CR31","unstructured":"Ramesh, A., Dhariwal, P., Nichol, A., Chu, C., Chen, M.: Hierarchical text-conditional image generation with CLIP latents. arXiv preprint arXiv:2204.06125, 1(2), 3 (2022)"},{"key":"9_CR32","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"9_CR33","doi-asserted-by":"crossref","unstructured":"Saharia, C., et al.: Palette: image-to-image diffusion models. In: ACM SIGGRAPH 2022 Conference Proceedings, pp. 1\u201310 (2022)","DOI":"10.1145\/3528233.3530757"},{"key":"9_CR34","unstructured":"Saharia, C., et al.: Photorealistic text-to-image diffusion models with deep language understanding, vol. 35, pp. 36479\u201336494 (2022)"},{"key":"9_CR35","unstructured":"Schuhmann, C., et\u00a0al.: LAION-5B: an open large-scale dataset for training next generation image-text models, vol. 35, pp. 25278\u201325294 (2022)"},{"key":"9_CR36","doi-asserted-by":"crossref","unstructured":"Shue, J.R., Chan, E.R., Po, R., Ankner, Z., Wu, J., Wetzstein, G.: 3D neural field generation using triplane diffusion. In: CVPR, pp. 20875\u201320886 (2023)","DOI":"10.1109\/CVPR52729.2023.02000"},{"key":"9_CR37","doi-asserted-by":"crossref","unstructured":"Song, L., et al.: RoomDreamer: text-driven 3D indoor scene synthesis with coherent geometry and texture. In: ACM MM (2023)","DOI":"10.1145\/3581783.3611800"},{"key":"9_CR38","unstructured":"Tang, J., Ren, J., Zhou, H., Liu, Z., Zeng, G.: DreamGaussian: generative gaussian splatting for efficient 3D content creation (2024)"},{"key":"9_CR39","unstructured":"Tang, S., Zhang, F., Chen, J., Wang, P., Furukawa, Y.: MVDiffusion: enabling holistic multi-view image generation with correspondence-aware diffusion. In: NeurIPS (2023)"},{"key":"9_CR40","doi-asserted-by":"crossref","unstructured":"Voynov, A., Aberman, K., Cohen-Or, D.: Sketch-guided text-to-image diffusion models. In: ACM SIGGRAPH 2023 Conference Proceedings, pp. 1\u201311 (2023)","DOI":"10.1145\/3588432.3591560"},{"key":"9_CR41","doi-asserted-by":"crossref","unstructured":"Wang, H., Du, X., Li, J., Yeh, R.A., Shakhnarovich, G.: Score Jacobian chaining: lifting pretrained 2D diffusion models for 3D generation. In: CVPR, pp. 12619\u201312629 (2023)","DOI":"10.1109\/CVPR52729.2023.01214"},{"key":"9_CR42","unstructured":"Wu, T., Zheng, C., Cham, T.J.: PanoDiffusion: depth-aided 360-degree indoor RGB panorama outpainting via latent diffusion model. In: ICLR (2024)"},{"key":"9_CR43","doi-asserted-by":"crossref","unstructured":"Yang, B., et al.: Paint by example: exemplar-based image editing with diffusion models. In: CVPR, pp. 18381\u201318391 (2023)","DOI":"10.1109\/CVPR52729.2023.01763"},{"key":"9_CR44","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models. In: ICCV, pp. 3836\u20133847 (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"9_CR45","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: CVPR, pp. 586\u2013595 (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"key":"9_CR46","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"519","DOI":"10.1007\/978-3-030-58545-7_30","volume-title":"Computer Vision \u2013 ECCV 2020","author":"J Zheng","year":"2020","unstructured":"Zheng, J., Zhang, J., Li, J., Tang, R., Gao, S., Zhou, Z.: Structured3D: a large photo-realistic dataset for structured 3D modeling. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12354, pp. 519\u2013535. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58545-7_30"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72913-3_9","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T23:23:05Z","timestamp":1733095385000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72913-3_9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,2]]},"ISBN":["9783031729126","9783031729133"],"references-count":46,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72913-3_9","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12,2]]},"assertion":[{"value":"2 December 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}