{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,24]],"date-time":"2025-05-24T04:03:39Z","timestamp":1748059419658,"version":"3.41.0"},"publisher-location":"Cham","reference-count":54,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031918551","type":"print"},{"value":"9783031918568","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-91856-8_12","type":"book-chapter","created":{"date-parts":[[2025,5,23]],"date-time":"2025-05-23T11:12:15Z","timestamp":1747998735000},"page":"195-211","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Advancing Few-Shot Novel View Synthesis with\u00a0Teacher-Student Guided Scene Geometry Refinement"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-8076-0805","authenticated-orcid":false,"given":"Yan","family":"Xing","sequence":"first","affiliation":[]},{"given":"Pan","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Yali","family":"Guo","sequence":"additional","affiliation":[]},{"given":"Yongxin","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Shuangguan","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Youcheng","family":"Cai","sequence":"additional","affiliation":[]},{"given":"Ligang","family":"Liu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,5,12]]},"reference":[{"key":"12_CR1","unstructured":"Zhang, K., Riegler, G., Snavely, N., Koltun, V.: Nerf++: Analyzing and improving neural radiance fields. arXiv preprint arXiv:2010.07492 (2020)"},{"key":"12_CR2","doi-asserted-by":"crossref","unstructured":"Yu, A., Ye, V., Tancik, M., Kanazawa, A.: pixelnerf: Neural radiance fields from one or few images. In: CVPR, pp. 4578\u20134587 (2021)","DOI":"10.1109\/CVPR46437.2021.00455"},{"key":"12_CR3","doi-asserted-by":"crossref","unstructured":"Trevithick, A., Yang, B.: GRF: Learning a general radiance field for 3D representation and rendering. In: CVPR, pp. 15182\u201315192 (2021)","DOI":"10.1109\/ICCV48922.2021.01490"},{"key":"12_CR4","doi-asserted-by":"crossref","unstructured":"Chen, A., et al.: Mvsnerf: fast generalizable radiance field reconstruction from multi-view stereo. In: CVPR, pp. 14124\u201314133 (2021)","DOI":"10.1109\/ICCV48922.2021.01386"},{"key":"12_CR5","doi-asserted-by":"crossref","unstructured":"Chibane, J., Bansal, A., Lazova, V., Pons-Moll, G.: Stereo radiance fields (srf): Learning view synthesis for sparse views of novel scenes. In: CVPR, pp. 7911\u20137920 (2021)","DOI":"10.1109\/CVPR46437.2021.00782"},{"key":"12_CR6","doi-asserted-by":"crossref","unstructured":"Wang, Q., et al.: Ibrnet: Learning multi-view image-based rendering. In: CVPR, pp. 4690\u20134699 (2021)","DOI":"10.1109\/CVPR46437.2021.00466"},{"key":"12_CR7","doi-asserted-by":"crossref","unstructured":"Johari, M.M., Lepoittevin, Y., Fleuret, F.: Geonerf: Generalizing nerf with geometry priors. In: CVPR, pp. 18365\u201318375 (2022)","DOI":"10.1109\/CVPR52688.2022.01782"},{"key":"12_CR8","doi-asserted-by":"crossref","unstructured":"Szymanowicz, S., Rupprecht, C., Vedaldi, A.: Splatter image: Ultra-fast single-view 3d reconstruction. arXiv preprint arXiv:2312.13150 (2023)","DOI":"10.1109\/CVPR52733.2024.00972"},{"key":"12_CR9","doi-asserted-by":"crossref","unstructured":"Zhu, H., He, T., Li, X., Li, B., Chen, Z.: Is vanilla mlp in neural radiance field enough for few-shot view synthesis? arXiv preprint arXiv:2403.06092 (2024)","DOI":"10.1109\/CVPR52733.2024.01918"},{"key":"12_CR10","doi-asserted-by":"crossref","unstructured":"Li, J., et al.: Dngaussian: Optimizing sparse-view 3D Gaussian radiance fields with global-local depth normalization. arXiv preprint arXiv:2403.06912 (2024)","DOI":"10.1109\/CVPR52733.2024.01963"},{"key":"12_CR11","doi-asserted-by":"crossref","unstructured":"Zhu, Z., Fan, Z., Jiang, Y., Wang, Z.: Fsgs: Real-time few-shot view synthesis using gaussian splatting. arXiv preprint arXiv:2312.00451 (2023)","DOI":"10.1007\/978-3-031-72933-1_9"},{"key":"12_CR12","doi-asserted-by":"crossref","unstructured":"Yang, J., Pavone, M., Wang, Y.: Freenerf: Improving few-shot neural rendering with free frequency regularization. In: CVPR, pp. 8254\u20138263 (2023)","DOI":"10.1109\/CVPR52729.2023.00798"},{"key":"12_CR13","doi-asserted-by":"crossref","unstructured":"Deng, K., Liu, A., Zhu, J.Y., Ramanan, D.: Depth-supervised nerf: Fewer views and faster training for free. In: CVPR, pp. 12882\u201312891 (2022)","DOI":"10.1109\/CVPR52688.2022.01254"},{"key":"12_CR14","doi-asserted-by":"crossref","unstructured":"Niemeyer, M., Barron, J.T., Mildenhall, B., Sajjadi, M.S., Geiger, A., Radwan, N.: Regnerf: Regularizing neural radiance fields for view synthesis from sparse inputs. In: CVPR, pp. 5480\u20135490 (2022)","DOI":"10.1109\/CVPR52688.2022.00540"},{"key":"12_CR15","doi-asserted-by":"crossref","unstructured":"Somraj, N., Karanayil, A., Soundararajan, R.: Simplenerf: Regularizing sparse input neural radiance fields with simpler solutions. In: SIGGRAPH Asia 2023 Conference Papers, pp. 1\u201311 (2023)","DOI":"10.1145\/3610548.3618188"},{"key":"12_CR16","doi-asserted-by":"crossref","unstructured":"Seo, S., Han, D., Chang, Y., Kwak, N.: Mixnerf: Modeling a ray with mixture density for novel view synthesis from sparse inputs. In: CVPR, pp. 20659\u201320668 (2023)","DOI":"10.1109\/CVPR52729.2023.01979"},{"key":"12_CR17","doi-asserted-by":"crossref","unstructured":"Seo, S., Chang, Y., Kwak, N.: Flipnerf: Flipped reflection rays for few-shot novel view synthesis. In: CVPR, pp. 22883\u201322893 (2023)","DOI":"10.1109\/ICCV51070.2023.02092"},{"key":"12_CR18","doi-asserted-by":"crossref","unstructured":"Kim, M., Seo, S., Han, B.: Infonerf: Ray entropy minimization for few-shot neural volume rendering. In: CVPR, pp. 12912\u201312921 (2022)","DOI":"10.1109\/CVPR52688.2022.01257"},{"issue":"1","key":"12_CR19","doi-asserted-by":"publisher","first-page":"99","DOI":"10.1145\/3503250","volume":"65","author":"B Mildenhall","year":"2021","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: Nerf: representing scenes as neural radiance fields for view synthesis. Commun. ACM 65(1), 99\u2013106 (2021)","journal-title":"Commun. ACM"},{"key":"12_CR20","doi-asserted-by":"crossref","unstructured":"Barron, J.T., Mildenhall, B., Tancik, M., Hedman, P., Martin-Brualla, R., Srinivasan, P.P.: Mip-nerf: a multiscale representation for anti-aliasing neural radiance fields. In: CVPR, pp. 5855\u20135864 (2021)","DOI":"10.1109\/ICCV48922.2021.00580"},{"key":"12_CR21","first-page":"4805","volume":"34","author":"L Yariv","year":"2021","unstructured":"Yariv, L., Gu, J., Kasten, Y., Lipman, Y.: Volume rendering of neural implicit surfaces. Adv. Neural. Inf. Process. Syst. 34, 4805\u20134815 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"12_CR22","unstructured":"Wang, P., Liu, L., Liu, Y., Theobalt, C., Komura, T., Wang, W.: Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689 (2021)"},{"key":"12_CR23","first-page":"1966","volume":"35","author":"Y Wang","year":"2022","unstructured":"Wang, Y., Skorokhodov, I., Wonka, P.: Hf-neus: Improved surface reconstruction using high-frequency details. Adv. Neural. Inf. Process. Syst. 35, 1966\u20131978 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"12_CR24","doi-asserted-by":"crossref","unstructured":"Li, Z., et al.: Neuralangelo: High-fidelity neural surface reconstruction. In: CVPR, pp. 8456\u20138465 (2023)","DOI":"10.1109\/CVPR52729.2023.00817"},{"key":"12_CR25","doi-asserted-by":"crossref","unstructured":"Rosinol, A., Leonard, J.J., Carlone, L.: Nerf-slam: Real-time dense monocular slam with neural radiance fields. In: 2023 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 3437\u20133444. IEEE (2023)","DOI":"10.1109\/IROS55552.2023.10341922"},{"key":"12_CR26","doi-asserted-by":"crossref","unstructured":"Avraham, G., et al.: Nerfels: renderable neural codes for improved camera pose estimation. In: CVPR, pp. 5061\u20135070 (2022)","DOI":"10.1109\/CVPRW56347.2022.00554"},{"key":"12_CR27","unstructured":"Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: Dreamfusion: Text-to-3d using 2d diffusion. arXiv preprint arXiv:2209.14988 (2022)"},{"key":"12_CR28","doi-asserted-by":"crossref","unstructured":"H\u00f6llein, L., Cao, A., Owens, A., Johnson, J., Nie\u00dfner, M.: Text2room: Extracting textured 3d meshes from 2d text-to-image models. arXiv preprint arXiv:2303.11989 (2023)","DOI":"10.1109\/ICCV51070.2023.00727"},{"key":"12_CR29","unstructured":"Liu, M., Xu, C., Jin, H., Chen, L., Xu, Z., Su, H., et\u00a0al.: One-2-3-45: Any single image to 3d mesh in 45 seconds without per-shape optimization. arXiv preprint arXiv:2306.16928 (2023)"},{"key":"12_CR30","doi-asserted-by":"crossref","unstructured":"Yu, A., Li, R., Tancik, M., Li, H., Ng, R., Kanazawa, A.: Plenoctrees for real-time rendering of neural radiance fields. In: CVPR, pp. 5752\u20135761 (2021)","DOI":"10.1109\/ICCV48922.2021.00570"},{"key":"12_CR31","doi-asserted-by":"crossref","unstructured":"Chen, A., Xu, Z., Geiger, A., Yu, J., Su, H.: Tensorf: Tensorial radiance fields. In: ECCV, pp. 333\u2013350. Springer (2022)","DOI":"10.1007\/978-3-031-19824-3_20"},{"key":"12_CR32","doi-asserted-by":"crossref","unstructured":"Reiser, C., Peng, S., Liao, Y., Geiger, A.: Kilonerf: Speeding up neural radiance fields with thousands of tiny mlps. In: CVPR, pp. 14335\u201314345 (2021)","DOI":"10.1109\/ICCV48922.2021.01407"},{"key":"12_CR33","doi-asserted-by":"crossref","unstructured":"Barron, J.T., Mildenhall, B., Verbin, D., Srinivasan, P.P., Hedman, P.: Zip-nerf: Anti-aliased grid-based neural radiance fields. arXiv preprint arXiv:2304.06706 (2023)","DOI":"10.1109\/ICCV51070.2023.01804"},{"key":"12_CR34","doi-asserted-by":"crossref","unstructured":"Fridovich-Keil, S., Yu, A., Tancik, M., Chen, Q., Recht, B., Kanazawa, A.: Plenoxels: radiance fields without neural networks. In: CVPR, pp. 5501\u20135510 (2022)","DOI":"10.1109\/CVPR52688.2022.00542"},{"issue":"4","key":"12_CR35","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3528223.3530127","volume":"41","author":"T M\u00fcller","year":"2022","unstructured":"M\u00fcller, T., Evans, A., Schied, C., Keller, A.: Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph. (ToG) 41(4), 1\u201315 (2022)","journal-title":"ACM Trans. Graph. (ToG)"},{"key":"12_CR36","doi-asserted-by":"crossref","unstructured":"Hu, W., et al.: Tri-miprf: Tri-mip representation for efficient anti-aliasing neural radiance fields. In: CVPR, pp. 19774\u201319783 (2023)","DOI":"10.1109\/ICCV51070.2023.01811"},{"key":"12_CR37","first-page":"15651","volume":"33","author":"L Liu","year":"2020","unstructured":"Liu, L., Gu, J., Zaw Lin, K., Chua, T.S., Theobalt, C.: Neural sparse voxel fields. Adv. Neural. Inf. Process. Syst. 33, 15651\u201315663 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"12_CR38","doi-asserted-by":"crossref","unstructured":"Chen, Z., et al.: Neurbf: A neural fields representation with adaptive radial basis functions. In: CVPR, pp. 4182\u20134194 (2023)","DOI":"10.1109\/ICCV51070.2023.00386"},{"key":"12_CR39","doi-asserted-by":"crossref","unstructured":"Somraj, N., Soundararajan, R.: Vip-nerf: Visibility prior for sparse input neural radiance fields. In: ACM SIGGRAPH 2023 Conference Proceedings, pp. 1\u201311 (2023)","DOI":"10.1145\/3610548.3618188"},{"key":"12_CR40","doi-asserted-by":"crossref","unstructured":"Roessle, B., Barron, J.T., Mildenhall, B., Srinivasan, P.P., Nie\u00dfner, M.: Dense depth priors for neural radiance fields from sparse input views. In: CVPR, pp. 12892\u201312901 (2022)","DOI":"10.1109\/CVPR52688.2022.01255"},{"key":"12_CR41","doi-asserted-by":"crossref","unstructured":"Wu, R., et\u00a0al.: Reconfusion: 3d reconstruction with diffusion priors. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 21551\u201321561 (2024)","DOI":"10.1109\/CVPR52733.2024.02036"},{"key":"12_CR42","doi-asserted-by":"crossref","unstructured":"Shi, R., Wei, X., Wang, C., Su, H.: Zerorf: Fast sparse view $$360^\\circ $$ reconstruction with zero pretraining. arXiv preprint arXiv:2312.09249 (2023)","DOI":"10.1109\/CVPR52733.2024.01995"},{"key":"12_CR43","doi-asserted-by":"crossref","unstructured":"Sun, J., et al.: Vgos: Voxel grid optimization for view synthesis from sparse inputs. arXiv preprint arXiv:2304.13386 (2023)","DOI":"10.24963\/ijcai.2023\/157"},{"key":"12_CR44","unstructured":"Kwak, M., Song, J., Kim, S.: Geconerf: Few-shot neural radiance fields via geometric consistency. arXiv preprint arXiv:2301.10941 (2023)"},{"key":"12_CR45","doi-asserted-by":"crossref","unstructured":"Wang, G., Chen, Z., Loy, C.C., Liu, Z.: Sparsenerf: Distilling depth ranking for few-shot novel view synthesis. arXiv preprint arXiv:2303.16196 (2023)","DOI":"10.1109\/ICCV51070.2023.00832"},{"key":"12_CR46","doi-asserted-by":"crossref","unstructured":"Jain, A., Tancik, M., Abbeel, P.: Putting nerf on a diet: semantically consistent few-shot view synthesis. In: CVPR, pp. 5885\u20135894 (2021)","DOI":"10.1109\/ICCV48922.2021.00583"},{"key":"12_CR47","unstructured":"Xiong, H., Muttukuru, S., Upadhyay, R., Chari, P., Kadambi, A.: Sparsegs: Real-time $$360^\\circ $$ sparse view synthesis using gaussian splatting. arXiv preprint arXiv:2312.00206 (2023)"},{"key":"12_CR48","doi-asserted-by":"crossref","unstructured":"Paliwal, A., et al.: Coherentgs: Sparse novel view synthesis with coherent 3d gaussians. arXiv preprint arXiv:2403.19495 (2024)","DOI":"10.1007\/978-3-031-73404-5_2"},{"issue":"4","key":"12_CR49","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3592433","volume":"42","author":"B Kerbl","year":"2023","unstructured":"Kerbl, B., Kopanas, G., Leimk\u00fchler, T., Drettakis, G.: 3D Gaussian splatting for real-time radiance field rendering. ACM Trans. Graph. 42(4), 1\u201314 (2023)","journal-title":"ACM Trans. Graph."},{"key":"12_CR50","doi-asserted-by":"crossref","unstructured":"Jensen, R., Dahl, A., Vogiatzis, G., Tola, E., Aan\u00e6s, H.: Large scale multi-view stereopsis evaluation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 406\u2013413 (2014)","DOI":"10.1109\/CVPR.2014.59"},{"key":"12_CR51","unstructured":"Nazarczuk, M., Tanay, T., Catley-Chandar, S., Shaw, R., Timofte, R., P\u00e9rez-Pellitero, E.: AIM 2024 sparse neural rendering challenge: dataset and benchmark. In: Proceedings of the European Conference on Computer Vision (ECCV) Workshops (2024)"},{"key":"12_CR52","doi-asserted-by":"crossref","unstructured":"Truong, P., Rakotosaona, M.J., Manhardt, F., Tombari, F.: Sparf: Neural radiance fields from sparse and noisy poses. In: CVPR, pp. 4190\u20134200 (2023)","DOI":"10.1109\/CVPR52729.2023.00408"},{"key":"12_CR53","unstructured":"Nazarczuk, M., et al.: AIM 2024 sparse neural rendering challenge: methods and results. In: Proceedings of the European Conference on Computer Vision (ECCV) Workshops (2024)"},{"key":"12_CR54","unstructured":"Wu, T., et al.: Voxurf: Voxel-based efficient and accurate neural surface reconstruction. arXiv preprint arXiv:2208.12697 (2022)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024 Workshops"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-91856-8_12","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,23]],"date-time":"2025-05-23T11:12:39Z","timestamp":1747998759000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-91856-8_12"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031918551","9783031918568"],"references-count":54,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-91856-8_12","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"12 May 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}