{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,2]],"date-time":"2025-11-02T18:38:51Z","timestamp":1762108731498,"version":"build-2065373602"},"publisher-location":"Cham","reference-count":86,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031726576"},{"type":"electronic","value":"9783031726583"}],"license":[{"start":{"date-parts":[[2024,10,2]],"date-time":"2024-10-02T00:00:00Z","timestamp":1727827200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,2]],"date-time":"2024-10-02T00:00:00Z","timestamp":1727827200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72658-3_5","type":"book-chapter","created":{"date-parts":[[2024,10,2]],"date-time":"2024-10-02T03:32:37Z","timestamp":1727839957000},"page":"71-89","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["CaesarNeRF: Calibrated Semantic Representation for\u00a0Few-Shot Generalizable Neural Rendering"],"prefix":"10.1007","author":[{"given":"Haidong","family":"Zhu","sequence":"first","affiliation":[]},{"given":"Tianyu","family":"Ding","sequence":"additional","affiliation":[]},{"given":"Tianyi","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Ilya","family":"Zharkov","sequence":"additional","affiliation":[]},{"given":"Ram","family":"Nevatia","sequence":"additional","affiliation":[]},{"given":"Luming","family":"Liang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,2]]},"reference":[{"key":"5_CR1","doi-asserted-by":"crossref","unstructured":"Bao, C., et al.: Sine: Semantic-driven image-based nerf editing with prior-guided editing field. In: CVPR, pp. 20919\u201320929 (2023)","DOI":"10.1109\/CVPR52729.2023.02004"},{"key":"5_CR2","unstructured":"Bao, Y., Ding, T., Huo, J., Li, W., Li, Y., Gao, Y.: Insertnerf: instilling generalizability into nerf with hypernet modules. In: ICLR (2024)"},{"key":"5_CR3","doi-asserted-by":"crossref","unstructured":"Bao, Y., Li, Y., Huo, J., Ding, T., Liang, X., Li, W., Gao, Y.: Where and how: Mitigating confusion in neural radiance fields from sparse inputs. arXiv preprint arXiv:2308.02908 (2023)","DOI":"10.1145\/3581783.3613769"},{"key":"5_CR4","doi-asserted-by":"crossref","unstructured":"Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Mip-nerf 360: unbounded anti-aliased neural radiance fields. In: CVPR, pp. 5470\u20135479 (2022)","DOI":"10.1109\/CVPR52688.2022.00539"},{"key":"5_CR5","doi-asserted-by":"crossref","unstructured":"Buehler, C., Bosse, M., McMillan, L., Gortler, S., Cohen, M.: Unstructured lumigraph rendering. In: SIGGRAPH, pp. 425\u2013432 (2001)","DOI":"10.1145\/383259.383309"},{"key":"5_CR6","doi-asserted-by":"crossref","unstructured":"Chen, A., et al.: Mvsnerf: fast generalizable radiance field reconstruction from multi-view stereo. In: ICCV, pp. 14124\u201314133 (2021)","DOI":"10.1109\/ICCV48922.2021.01386"},{"key":"5_CR7","doi-asserted-by":"crossref","unstructured":"Chen, W., et al.: Beyond appearance: a semantic controllable self-supervised learning framework for human-centric visual tasks. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01445"},{"key":"5_CR8","unstructured":"Chen, Y., Xu, H., Wu, Q., Zheng, C., Cham, T.J., Cai, J.: Explicit correspondence matching for generalizable neural radiance fields. arXiv preprint arXiv:2304.12294 (2023)"},{"key":"5_CR9","doi-asserted-by":"crossref","unstructured":"Chibane, J., Bansal, A., Lazova, V., Pons-Moll, G.: Stereo radiance fields (srf): learning view synthesis for sparse views of novel scenes. In: CVPR, pp. 7911\u20137920 (2021)","DOI":"10.1109\/CVPR46437.2021.00782"},{"key":"5_CR10","doi-asserted-by":"crossref","unstructured":"Cong, W., et al.: Enhancing nerf akin to enhancing llms: generalizable nerf transformer with mixture-of-view-experts. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00296"},{"key":"5_CR11","doi-asserted-by":"crossref","unstructured":"Debevec, P.E., Taylor, C.J., Malik, J.: Modeling and rendering architecture from photographs: a hybrid geometry-and image-based approach. In: SIGGRAPH, pp. 11\u201320 (1996)","DOI":"10.1145\/237170.237191"},{"key":"5_CR12","doi-asserted-by":"crossref","unstructured":"Deng, C., et al.: Nerdi: single-view nerf synthesis with language-guided diffusion as general image priors. In: CVPR, pp. 20637\u201320647 (2023)","DOI":"10.1109\/CVPR52729.2023.01977"},{"key":"5_CR13","doi-asserted-by":"crossref","unstructured":"Deng, K., Liu, A., Zhu, J.Y., Ramanan, D.: Depth-supervised nerf: fewer views and faster training for free. In: CVPR, pp. 12882\u201312891 (2022)","DOI":"10.1109\/CVPR52688.2022.01254"},{"key":"5_CR14","doi-asserted-by":"crossref","unstructured":"Downs, L., et al.: Google scanned objects: a high-quality dataset of 3d scanned household items. In: ICRA, pp. 2553\u20132560 (2022)","DOI":"10.1109\/ICRA46639.2022.9811809"},{"key":"5_CR15","doi-asserted-by":"crossref","unstructured":"Fridovich-Keil, S., Meanti, G., Warburg, F.R., Recht, B., Kanazawa, A.: K-planes: explicit radiance fields in space, time, and appearance. In: CVPR, pp. 12479\u201312488 (2023)","DOI":"10.1109\/CVPR52729.2023.01201"},{"key":"5_CR16","unstructured":"Fu, Y., Misra, I., Wang, X.: Multiplane nerf-supervised disentanglement of depth and camera pose from videos. In: ICML (2022)"},{"key":"5_CR17","doi-asserted-by":"crossref","unstructured":"Gao, Y., Cao, Y.P., Shan, Y.: Surfelnerf: neural surfel radiance fields for online photorealistic reconstruction of indoor scenes. In: CVPR, pp. 108\u2013118 (2023)","DOI":"10.1109\/CVPR52729.2023.00019"},{"key":"5_CR18","unstructured":"Gu, J., Liu, L., Wang, P., Theobalt, C.: Stylenerf: A style-based 3d-aware generator for high-resolution image synthesis. arXiv preprint arXiv:2110.08985 (2021)"},{"key":"5_CR19","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In: NeurIPS, pp. 6840\u20136851 (2020)"},{"key":"5_CR20","doi-asserted-by":"crossref","unstructured":"Irshad, M.Z., et al.: Neo 360: neural fields for sparse view synthesis of outdoor scenes. In: ICCV, pp. 9187\u20139198 (2023)","DOI":"10.1109\/ICCV51070.2023.00843"},{"key":"5_CR21","doi-asserted-by":"crossref","unstructured":"Jain, A., Mildenhall, B., Barron, J.T., Abbeel, P., Poole, B.: Zero-shot text-guided object generation with dream fields. In: CVPR, pp. 867\u2013876 (2022)","DOI":"10.1109\/CVPR52688.2022.00094"},{"key":"5_CR22","doi-asserted-by":"crossref","unstructured":"Jain, A., Tancik, M., Abbeel, P.: Putting nerf on a diet: semantically consistent few-shot view synthesis. In: ICCV, pp. 5885\u20135894 (2021)","DOI":"10.1109\/ICCV48922.2021.00583"},{"key":"5_CR23","doi-asserted-by":"crossref","unstructured":"Jang, W., Agapito, L.: Codenerf: disentangled neural radiance fields for object categories. In: ICCV, pp. 12949\u201312958 (2021)","DOI":"10.1109\/ICCV48922.2021.01271"},{"key":"5_CR24","doi-asserted-by":"crossref","unstructured":"Jiang, Y., et al.: Alignerf: high-fidelity neural radiance fields via alignment-aware training. In: CVPR, pp. 46\u201355 (2023)","DOI":"10.1109\/CVPR52729.2023.00013"},{"key":"5_CR25","doi-asserted-by":"crossref","unstructured":"Johari, M.M., Lepoittevin, Y., Fleuret, F.: Geonerf: generalizing nerf with geometry priors. In: CVPR, pp. 18365\u201318375 (2022)","DOI":"10.1109\/CVPR52688.2022.01782"},{"key":"5_CR26","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"694","DOI":"10.1007\/978-3-319-46475-6_43","volume-title":"Computer Vision \u2013 ECCV 2016","author":"J Johnson","year":"2016","unstructured":"Johnson, J., Alahi, A., Fei-Fei, L.: Perceptual losses for real-time style transfer and super-resolution. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9906, pp. 694\u2013711. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46475-6_43"},{"key":"5_CR27","unstructured":"Kania, A., Kasymov, A., Zi\u0119ba, M., Spurek, P.: Hypernerfgan: Hypernetwork approach to 3d nerf gan. arXiv preprint arXiv:2301.11631 (2023)"},{"key":"5_CR28","unstructured":"Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)"},{"key":"5_CR29","unstructured":"Kwak, M., Song, J., Kim, S.: Geconerf: Few-shot neural radiance fields via geometric consistency. arXiv preprint arXiv:2301.10941 (2023)"},{"key":"5_CR30","doi-asserted-by":"crossref","unstructured":"Li, Z., Wang, Q., Cole, F., Tucker, R., Snavely, N.: Dynibar: neural dynamic image-based rendering. In: CVPR, pp. 4273\u20134284 (2023)","DOI":"10.1109\/CVPR52729.2023.00416"},{"key":"5_CR31","doi-asserted-by":"crossref","unstructured":"Lin, C.H., et al.: Magic3d: high-resolution text-to-3d content creation. In: CVPR, pp. 300\u2013309 (2023)","DOI":"10.1109\/CVPR52729.2023.00037"},{"key":"5_CR32","doi-asserted-by":"crossref","unstructured":"Lin, H., Peng, S., Xu, Z., Yan, Y., Shuai, Q., Bao, H., Zhou, X.: Efficient neural radiance fields for interactive free-viewpoint video. In: SIGGRAPH Asia 2022 Conference Papers pp.\u00a01\u20139 (2022)","DOI":"10.1145\/3550469.3555376"},{"key":"5_CR33","unstructured":"Lin, Y., et al.: Componerf: Text-guided multi-object compositional nerf with editable 3d scene layout. arXiv preprint arXiv:2303.13843 (2023)"},{"issue":"6","key":"5_CR34","first-page":"1","volume":"40","author":"L Liu","year":"2021","unstructured":"Liu, L., Habermann, M., Rudnev, V., Sarkar, K., Gu, J., Theobalt, C.: Neural actor: Neural free-view synthesis of human actors with pose control. TOC 40(6), 1\u201316 (2021)","journal-title":"TOC"},{"key":"5_CR35","doi-asserted-by":"crossref","unstructured":"Liu, R., Wu, R., Van\u00a0Hoorick, B., Tokmakov, P., Zakharov, S., Vondrick, C.: Zero-1-to-3: Zero-shot one image to 3d object. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00853"},{"key":"5_CR36","unstructured":"Liu, X., Kao, S.h., Chen, J., Tai, Y.W., Tang, C.K.: Deceptive-nerf: Enhancing nerf reconstruction using pseudo-observations from diffusion models. arXiv preprint arXiv:2305.15171 (2023)"},{"key":"5_CR37","unstructured":"Liu, Y., et al.: Syncdreamer: Generating multiview-consistent images from a single-view image. arXiv preprint arXiv:2309.03453 (2023)"},{"key":"5_CR38","doi-asserted-by":"crossref","unstructured":"l; Liu, Y., et al.: Neural rays for occlusion-aware image-based rendering. In: CVPR, pp. 7824\u20137833 (2022)","DOI":"10.1109\/CVPR52688.2022.00767"},{"key":"5_CR39","doi-asserted-by":"crossref","unstructured":"Mariotti, O., Mac\u00a0Aodha, O., Bilen, H.: Viewnerf: Unsupervised viewpoint estimation using category-level neural radiance fields. arXiv preprint arXiv:2212.00436 (2022)","DOI":"10.1109\/ICCV48922.2021.01025"},{"key":"5_CR40","doi-asserted-by":"crossref","unstructured":"Martin-Brualla, R., et al.: Nerf in the wild: Neural radiance fields for unconstrained photo collections. In: CVPR, pp. 7210\u20137219 (2021)","DOI":"10.1109\/CVPR46437.2021.00713"},{"key":"5_CR41","doi-asserted-by":"crossref","unstructured":"Metzer, G., Richardson, E., Patashnik, O., Giryes, R., Cohen-Or, D.: Latent-nerf for shape-guided generation of 3d shapes and textures. In: CVPR, pp. 12663\u201312673 (2023)","DOI":"10.1109\/CVPR52729.2023.01218"},{"issue":"4","key":"5_CR42","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3306346.3322980","volume":"38","author":"B Mildenhall","year":"2019","unstructured":"Mildenhall, B., et al.: Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. TOG 38(4), 1\u201314 (2019)","journal-title":"TOG"},{"key":"5_CR43","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"405","DOI":"10.1007\/978-3-030-58452-8_24","volume-title":"Computer Vision \u2013 ECCV 2020","author":"B Mildenhall","year":"2020","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: representing scenes as neural radiance fields for view synthesis. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 405\u2013421. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_24"},{"issue":"1","key":"5_CR44","doi-asserted-by":"publisher","first-page":"99","DOI":"10.1145\/3503250","volume":"65","author":"B Mildenhall","year":"2021","unstructured":"Mildenhall, B., et al.: Nerf: Representing scenes as neural radiance fields for view synthesis. Commun. ACM 65(1), 99\u2013106 (2021)","journal-title":"Commun. ACM"},{"key":"5_CR45","doi-asserted-by":"crossref","unstructured":"Niemeyer, M., Barron, J.T., Mildenhall, B., Sajjadi, M.S., Geiger, A., Radwan, N.: Regnerf: regularizing neural radiance fields for view synthesis from sparse inputs. In: CVPR, pp. 5480\u20135490 (2022)","DOI":"10.1109\/CVPR52688.2022.00540"},{"key":"5_CR46","doi-asserted-by":"crossref","unstructured":"Noguchi, A., Sun, X., Lin, S., Harada, T.: Neural articulated radiance field. In: ICCV, pp. 5762\u20135772 (2021)","DOI":"10.1109\/ICCV48922.2021.00571"},{"key":"5_CR47","doi-asserted-by":"crossref","unstructured":"Park, K., et al.: Nerfies: deformable neural radiance fields. In: ICCV, pp. 5865\u20135874 (2021)","DOI":"10.1109\/ICCV48922.2021.00581"},{"key":"5_CR48","doi-asserted-by":"crossref","unstructured":"Peng, S., et al.: Animatable neural radiance fields for modeling dynamic human bodies. In: ICCV, pp. 14314\u201314323 (2021)","DOI":"10.1109\/ICCV48922.2021.01405"},{"key":"5_CR49","unstructured":"Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988 (2022)"},{"key":"5_CR50","doi-asserted-by":"crossref","unstructured":"Pumarola, A., Corona, E., Pons-Moll, G., Moreno-Noguer, F.: D-nerf: neural radiance fields for dynamic scenes. In: CVPR, pp. 10318\u201310327 (2021)","DOI":"10.1109\/CVPR46437.2021.01018"},{"key":"5_CR51","unstructured":"Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: deep learning on point sets for 3d classification and segmentation. In: CVPR, pp. 652\u2013660 (2017)"},{"key":"5_CR52","doi-asserted-by":"crossref","unstructured":"Roessle, B., Barron, J.T., Mildenhall, B., Srinivasan, P.P., Nie\u00dfner, M.: Dense depth priors for neural radiance fields from sparse input views. In: CVPR, pp. 12892\u201312901 (2022)","DOI":"10.1109\/CVPR52688.2022.01255"},{"key":"5_CR53","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"5_CR54","doi-asserted-by":"crossref","unstructured":"Sch\u00f6nberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.445"},{"key":"5_CR55","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"501","DOI":"10.1007\/978-3-319-46487-9_31","volume-title":"Computer Vision \u2013 ECCV 2016","author":"JL Sch\u00f6nberger","year":"2016","unstructured":"Sch\u00f6nberger, J.L., Zheng, E., Frahm, J.-M., Pollefeys, M.: Pixelwise view selection for unstructured multi-view stereo. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9907, pp. 501\u2013518. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46487-9_31"},{"key":"5_CR56","first-page":"20154","volume":"33","author":"K Schwarz","year":"2020","unstructured":"Schwarz, K., Liao, Y., Niemeyer, M., Geiger, A.: Graf: generative radiance fields for 3d-aware image synthesis. NeurIPS 33, 20154\u201320166 (2020)","journal-title":"NeurIPS"},{"key":"5_CR57","doi-asserted-by":"crossref","unstructured":"Shue, J.R., Chan, E.R., Po, R., Ankner, Z., Wu, J., Wetzstein, G.: 3d neural field generation using triplane diffusion. In: CVPR, pp. 20875\u201320886 (2023)","DOI":"10.1109\/CVPR52729.2023.02000"},{"key":"5_CR58","doi-asserted-by":"publisher","unstructured":"Suhail, M., Esteves, C., Sigal, L., Makadia, A.: Generalizable patch-based neural rendering. In: ECCV, pp. 156\u2013174 (2022). https:\/\/doi.org\/10.1007\/978-3-031-19824-3_10","DOI":"10.1007\/978-3-031-19824-3_10"},{"key":"5_CR59","doi-asserted-by":"crossref","unstructured":"Trevithick, A., Yang, B.: Grf: learning a general radiance field for 3d representation and rendering. In: ICCV, pp. 15182\u201315192 (2021)","DOI":"10.1109\/ICCV48922.2021.01490"},{"key":"5_CR60","unstructured":"Varma, M., Wang, P., Chen, X., Chen, T., Venugopalan, S., Wang, Z.: Is attention all that nerf needs? In: ICLR (2023)"},{"key":"5_CR61","unstructured":"Vaswani, A., et al.: Attention is all you need. NeurIPS 30 (2017)"},{"key":"5_CR62","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"836","DOI":"10.1007\/978-3-319-10602-1_54","volume-title":"Computer Vision \u2013 ECCV 2014","author":"M Waechter","year":"2014","unstructured":"Waechter, M., Moehrle, N., Goesele, M.: Let there be color! large-scale texturing of 3D reconstructions. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 836\u2013850. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_54"},{"key":"5_CR63","doi-asserted-by":"crossref","unstructured":"Wang, G., Chen, Z., Loy, C.C., Liu, Z.: Sparsenerf: distilling depth ranking for few-shot novel view synthesis. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00832"},{"key":"5_CR64","doi-asserted-by":"crossref","unstructured":"Wang, Q., et al.: Ibrnet: learning multi-view image-based rendering. In: CVPR, pp. 4690\u20134699 (2021)","DOI":"10.1109\/CVPR46437.2021.00466"},{"key":"5_CR65","doi-asserted-by":"crossref","unstructured":"Wang, T., et\u00a0al.: Rodin: a generative model for sculpting 3d digital avatars using diffusion. In: CVPR, pp. 4563\u20134573 (2023)","DOI":"10.1109\/CVPR52729.2023.00443"},{"key":"5_CR66","doi-asserted-by":"crossref","unstructured":"Wei, Y., Liu, S., Rao, Y., Zhao, W., Lu, J., Zhou, J.: Nerfingmvs: guided optimization of neural radiance fields for indoor multi-view stereo. In: CVPR, pp. 5610\u20135619 (2021)","DOI":"10.1109\/ICCV48922.2021.00556"},{"key":"5_CR67","doi-asserted-by":"crossref","unstructured":"Wizadwongsa, S., Phongthawee, P., Yenphraphai, J., Suwajanakorn, S.: Nex: real-time view synthesis with neural basis expansion. In: CVPR, pp. 8534\u20138543 (2021)","DOI":"10.1109\/CVPR46437.2021.00843"},{"key":"5_CR68","doi-asserted-by":"publisher","unstructured":"Xiangli, Y., et al.: Bungeenerf: progressive neural radiance field for extreme multi-scale scene rendering. In: ECCV (2022). https:\/\/doi.org\/10.1007\/978-3-031-19824-3_7","DOI":"10.1007\/978-3-031-19824-3_7"},{"key":"5_CR69","doi-asserted-by":"crossref","unstructured":"Xie, C., Park, K., Martin-Brualla, R., Brown, M.: Fig-nerf: figure-ground neural radiance fields for 3d object category modelling. In: 3DV, pp. 962\u2013971 (2021)","DOI":"10.1109\/3DV53792.2021.00104"},{"key":"5_CR70","doi-asserted-by":"publisher","unstructured":"Xu, D., Jiang, Y., Wang, P., Fan, Z., Shi, H., Wang, Z.: Sinnerf: training neural radiance fields on complex scenes from a single image. In: ECCV, pp. 736\u2013753 (2022). https:\/\/doi.org\/10.1007\/978-3-031-20047-2_42","DOI":"10.1007\/978-3-031-20047-2_42"},{"key":"5_CR71","doi-asserted-by":"crossref","unstructured":"Xu, Q., Xu, Z., Philip, J., Bi, S., Shu, Z., Sunkavalli, K., Neumann, U.: Point-nerf: point-based neural radiance fields. In: CVPR, pp. 5438\u20135448 (2022)","DOI":"10.1109\/CVPR52688.2022.00536"},{"key":"5_CR72","doi-asserted-by":"crossref","unstructured":"Yang, B., et al.: Learning object-compositional neural radiance field for editable scene rendering. In: ICCV, pp. 13779\u201313788 (2021)","DOI":"10.1109\/ICCV48922.2021.01352"},{"key":"5_CR73","doi-asserted-by":"crossref","unstructured":"Yang, H., Hong, L., Li, A., Hu, T., Li, Z., Lee, G.H., Wang, L.: Contranerf: Generalizable neural radiance fields for synthetic-to-real novel view synthesis via contrastive learning. In: CVPR, pp. 16508\u201316517 (2023)","DOI":"10.1109\/CVPR52729.2023.01584"},{"key":"5_CR74","doi-asserted-by":"crossref","unstructured":"Yang, J., Pavone, M., Wang, Y.: Freenerf: Improving few-shot neural rendering with free frequency regularization. In: CVPR, pp. 8254\u20138263 (2023)","DOI":"10.1109\/CVPR52729.2023.00798"},{"key":"5_CR75","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"785","DOI":"10.1007\/978-3-030-01237-3_47","volume-title":"Computer Vision \u2013 ECCV 2018","author":"Y Yao","year":"2018","unstructured":"Yao, Y., Luo, Z., Li, S., Fang, T., Quan, L.: MVSNet: depth inference for unstructured multi-view stereo. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11212, pp. 785\u2013801. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01237-3_47"},{"key":"5_CR76","doi-asserted-by":"crossref","unstructured":"Yu, A., Ye, V., Tancik, M., Kanazawa, A.: pixelnerf: neural radiance fields from one or few images. In: CVPR, pp. 4578\u20134587 (2021)","DOI":"10.1109\/CVPR46437.2021.00455"},{"key":"5_CR77","doi-asserted-by":"crossref","unstructured":"Yu, X., et\u00a0al.: Mvimgnet: a large-scale dataset of multi-view images. In: CVPR, pp. 9150\u20139161 (2023)","DOI":"10.1109\/CVPR52729.2023.00883"},{"key":"5_CR78","first-page":"29835","volume":"34","author":"J Zhang","year":"2021","unstructured":"Zhang, J., Yang, G., Tulsiani, S., Ramanan, D.: Ners: neural reflectance surfaces for sparse-view 3d reconstruction in the wild. NeurIPS 34, 29835\u201329847 (2021)","journal-title":"NeurIPS"},{"key":"5_CR79","doi-asserted-by":"crossref","unstructured":"Zhang, J., Li, X., Wan, Z., Wang, C., Liao, J.: Text2nerf: Text-driven 3d scene generation with neural radiance fields. arXiv preprint arXiv:2305.11588 (2023)","DOI":"10.1109\/TVCG.2024.3361502"},{"key":"5_CR80","unstructured":"Zhenxing, M., Xu, D.: Switch-nerf: learning scene decomposition with mixture of experts for large-scale neural radiance fields. In: ICLR (2023)"},{"key":"5_CR81","doi-asserted-by":"crossref","unstructured":"Zhou, T., Tucker, R., Flynn, J., Fyffe, G., Snavely, N.: Stereo magnification: Learning view synthesis using multiplane images. arXiv preprint arXiv:1805.09817 (2018)","DOI":"10.1145\/3197517.3201323"},{"key":"5_CR82","doi-asserted-by":"crossref","unstructured":"Zhou, Z., Tulsiani, S.: Sparsefusion: distilling view-conditioned diffusion for 3d reconstruction. In: CVPR, pp. 12588\u201312597 (2023)","DOI":"10.1109\/CVPR52729.2023.01211"},{"key":"5_CR83","doi-asserted-by":"crossref","unstructured":"Zhu, H., et al.: Multimodal neural radiance field. In: ICRA, pp. 9393\u20139399 (2023)","DOI":"10.1109\/ICRA48891.2023.10160388"},{"key":"5_CR84","doi-asserted-by":"crossref","unstructured":"Zhu, H., Zheng, Z., Zheng, W., Nevatia, R.: Cat-nerf: constancy-aware tx2former for dynamic body modeling. In: CVPRW, pp. 6618\u20136627 (2023)","DOI":"10.1109\/CVPRW59228.2023.00703"},{"key":"5_CR85","doi-asserted-by":"crossref","unstructured":"Zhuang, Y., Zhu, H., Sun, X., Cao, X.: Mofanerf: morphable facial neural radiance field. In: ECCV, pp. 268\u2013285 (2022)","DOI":"10.1007\/978-3-031-20062-5_16"},{"key":"5_CR86","unstructured":"Zimny, D., Trzci\u0144ski, T., Spurek, P.: Points2nerf: Generating neural radiance fields from 3d point cloud. arXiv preprint arXiv:2206.01290 (2022)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72658-3_5","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T23:53:10Z","timestamp":1732837990000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72658-3_5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,2]]},"ISBN":["9783031726576","9783031726583"],"references-count":86,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72658-3_5","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,10,2]]},"assertion":[{"value":"2 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}