{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T22:09:39Z","timestamp":1740175779599,"version":"3.37.3"},"reference-count":48,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2024,12,19]],"date-time":"2024-12-19T00:00:00Z","timestamp":1734566400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2024,12,19]],"date-time":"2024-12-19T00:00:00Z","timestamp":1734566400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62201458, 61901384, 61231016"],"award-info":[{"award-number":["62201458, 61901384, 61231016"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100007128","name":"Natural Science Foundation of Shaanxi Province","doi-asserted-by":"publisher","award":["2022JQ-577, 2024JC-YBQN-0651"],"award-info":[{"award-number":["2022JQ-577, 2024JC-YBQN-0651"]}],"id":[{"id":"10.13039\/501100007128","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Complex Intell. Syst."],"published-print":{"date-parts":[[2025,1]]},"DOI":"10.1007\/s40747-024-01696-6","type":"journal-article","created":{"date-parts":[[2024,12,19]],"date-time":"2024-12-19T10:06:12Z","timestamp":1734602772000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Rugularizing generalizable neural radiance field with limited-view images"],"prefix":"10.1007","volume":"11","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6705-1326","authenticated-orcid":false,"given":"Wei","family":"Sun","sequence":"first","affiliation":[]},{"given":"Ruijia","family":"Cui","sequence":"additional","affiliation":[]},{"given":"Qianzhou","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Xianguang","family":"Kong","sequence":"additional","affiliation":[]},{"given":"Yanning","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,19]]},"reference":[{"key":"1696_CR1","doi-asserted-by":"crossref","unstructured":"Li J, Feng Z, She Q, Ding H, Wang C, Lee GH (2021) Mine: towards continuous depth mpi with nerf for novel view synthesis. In: IEEE international conference on computer vision, pp 12578\u201312588","DOI":"10.1109\/ICCV48922.2021.01235"},{"issue":"3","key":"1696_CR2","doi-asserted-by":"publisher","first-page":"1190","DOI":"10.1109\/TIP.2017.2772858","volume":"27","author":"DM Rahaman","year":"2017","unstructured":"Rahaman DM, Paul M (2017) Virtual view synthesis for free viewpoint video and multiview video compression using gaussian mixture modelling. IEEE Trans Image Process 27(3):1190\u20131201","journal-title":"IEEE Trans Image Process"},{"issue":"2","key":"1696_CR3","doi-asserted-by":"publisher","first-page":"870","DOI":"10.1109\/TIP.2013.2295716","volume":"23","author":"B Ham","year":"2013","unstructured":"Ham B, Min D, Oh C, Do MN, Sohn K (2013) Probability-based rendering for view synthesis. IEEE Trans Image Process 23(2):870\u2013884","journal-title":"IEEE Trans Image Process"},{"key":"1696_CR4","doi-asserted-by":"crossref","unstructured":"Gkioxari G, Malik J, Johnson J (2019) Mesh r-cnn. In: IEEE international conference on computer vision, pp 9785\u20139795","DOI":"10.1109\/ICCV.2019.00988"},{"key":"1696_CR5","unstructured":"Qi CR, Su H, Mo K, Guibas LJ (2017) Pointnet: deep learning on point sets for 3d classification and segmentation. In: IEEE conference on computer vision and pattern recognition, pp 652\u2013660"},{"issue":"12","key":"1696_CR6","doi-asserted-by":"publisher","first-page":"2820","DOI":"10.1109\/TPAMI.2018.2868195","volume":"41","author":"B Yang","year":"2018","unstructured":"Yang B, Rosa S, Markham A, Trigoni N, Wen H (2018) Dense 3d object reconstruction from a single depth view. IEEE Trans Pattern Anal Mach Intell 41(12):2820\u20132834","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"1696_CR7","doi-asserted-by":"crossref","unstructured":"Chen Z, Zhang H (2019) Learning implicit fields for generative shape modeling. In: IEEE conference on computer vision and pattern recognition, pp 5939\u20135948","DOI":"10.1109\/CVPR.2019.00609"},{"key":"1696_CR8","doi-asserted-by":"crossref","unstructured":"Mescheder L, Oechsle M, Niemeyer M, Nowozin S, Geiger A (2019) Occupancy networks: learning 3d reconstruction in function space. In: IEEE conference on computer vision and pattern recognition, pp 4460\u20134470","DOI":"10.1109\/CVPR.2019.00459"},{"issue":"1","key":"1696_CR9","doi-asserted-by":"publisher","first-page":"99","DOI":"10.1145\/3503250","volume":"65","author":"B Mildenhall","year":"2021","unstructured":"Mildenhall B, Srinivasan PP, Tancik M, Barron JT, Ramamoorthi R, Ng R (2021) Nerf: representing scenes as neural radiance fields for view synthesis. Commun ACM 65(1):99\u2013106","journal-title":"Commun ACM"},{"key":"1696_CR10","doi-asserted-by":"crossref","unstructured":"Peng S, Niemeyer M, Mescheder L, Pollefeys M, Geiger A (2020) Convolutional occupancy networks. In: European conference computer vision, pp 523\u2013540","DOI":"10.1007\/978-3-030-58580-8_31"},{"key":"1696_CR11","doi-asserted-by":"crossref","unstructured":"Barron JT, Mildenhall B, Verbin D, Srinivasan PP, Hedman P (2022) Mip-nerf 360: unbounded anti-aliased neural radiance fields. In: IEEE conference on computer vision and pattern recognition, pp 5470\u20135479","DOI":"10.1109\/CVPR52688.2022.00539"},{"key":"1696_CR12","doi-asserted-by":"crossref","unstructured":"Niemeyer M, Barron JT, Mildenhall B, Sajjadi MS, Geiger A, Radwan N (2022) Regnerf: regularizing neural radiance fields for view synthesis from sparse inputs. In: IEEE conference on computer vision and pattern recognition, pp 5480\u20135490","DOI":"10.1109\/CVPR52688.2022.00540"},{"key":"1696_CR13","doi-asserted-by":"crossref","unstructured":"Wang J, Wang P, Long X, Theobalt C, Komura T, Liu L, Wang W (2022) Neuris: neural reconstruction of indoor scenes using normal priors. In: European conference on computer vision, pp 139\u2013155","DOI":"10.1007\/978-3-031-19824-3_9"},{"key":"1696_CR14","first-page":"4325","volume":"38","author":"Z Ni","year":"2024","unstructured":"Ni Z, Yang P, Yang W, Wang H, Ma L, Kwong S (2024) Colnerf: collaboration for generalizable sparse input neural radiance field. AAAI Conf Artif Intell 38:4325\u20134333","journal-title":"AAAI Conf Artif Intell"},{"key":"1696_CR15","doi-asserted-by":"crossref","unstructured":"Jensen R, Dahl A, Vogiatzis G, Tola E, Aan\u00e6s H (2014) Large scale multi-view stereopsis evaluation. In: IEEE conference on computer vision and pattern recognition, pp 406\u2013413","DOI":"10.1109\/CVPR.2014.59"},{"key":"1696_CR16","doi-asserted-by":"crossref","unstructured":"Wang Q, Wang Z, Genova K, Srinivasan PP, Zhou H, Barron JT, Martin-Brualla R, Snavely N, Funkhouser T (2021) Ibrnet: learning multi-view image-based rendering. In: IEEE conference on computer vision and pattern recognition, pp 4690\u20134699","DOI":"10.1109\/CVPR46437.2021.00466"},{"key":"1696_CR17","doi-asserted-by":"crossref","unstructured":"Chen A, Xu Z, Zhao F, Zhang X, Xiang F, Yu J, Su H (2021) Mvsnerf: fast generalizable radiance field reconstruction from multi-view stereo. In: IEEE international conference on computer vision, pp 14124\u201314133","DOI":"10.1109\/ICCV48922.2021.01386"},{"key":"1696_CR18","doi-asserted-by":"crossref","unstructured":"Buehler C, Bosse M, McMillan L, Gortler S, Cohen M (2023) Unstructured lumigraph rendering. In: Seminal graphics papers: pushing the boundaries, pp 497\u2013504","DOI":"10.1145\/3596711.3596764"},{"key":"1696_CR19","doi-asserted-by":"crossref","unstructured":"Heigl B, Koch R, Pollefeys M, Denzler J, Van Gool L (1999) Plenoptic modeling and rendering from image sequences taken by a hand-held camera. In: Mustererkennung, pp 94\u2013101","DOI":"10.1007\/978-3-642-60243-6_11"},{"issue":"6","key":"1696_CR20","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2980179.2980251","volume":"35","author":"NK Kalantari","year":"2016","unstructured":"Kalantari NK, Wang T-C, Ramamoorthi R (2016) Learning-based view synthesis for light field cameras. ACM Trans Gr 35(6):1\u201310","journal-title":"ACM Trans Gr"},{"issue":"6","key":"1696_CR21","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3272127.3275084","volume":"37","author":"P Hedman","year":"2018","unstructured":"Hedman P, Philip J, Price T, Frahm J-M, Drettakis G, Brostow G (2018) Deep blending for free-viewpoint image-based rendering. ACM Trans Gr 37(6):1\u201315","journal-title":"ACM Trans Gr"},{"key":"1696_CR22","doi-asserted-by":"crossref","unstructured":"Kellnhofer P, Jebe LC, Jones A, Spicer R, Pulli K, Wetzstein G (2021) Neural lumigraph rendering. In: IEEE conference on computer vision and pattern recognition, pp 4287\u20134297","DOI":"10.1109\/CVPR46437.2021.00427"},{"key":"1696_CR23","first-page":"15651","volume":"33","author":"L Liu","year":"2020","unstructured":"Liu L, Gu J, Zaw Lin K, Chua T-S, Theobalt C (2020) Neural sparse voxel fields. Adv Neural Inf Process Syst 33:15651\u201315663","journal-title":"Adv Neural Inf Process Syst"},{"key":"1696_CR24","doi-asserted-by":"crossref","unstructured":"Liu S, Zhang Y, Peng S, Shi B, Pollefeys M, Cui Z (2020) Dist: rendering deep implicit signed distance function with differentiable sphere tracing. In: IEEE conference on computer vision and pattern recognition, pp 2019\u20132028","DOI":"10.1109\/CVPR42600.2020.00209"},{"issue":"6","key":"1696_CR25","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/2980179.2982420","volume":"35","author":"P Hedman","year":"2016","unstructured":"Hedman P, Ritschel T, Drettakis G, Brostow G (2016) Scalable inside-out image-based rendering. ACM Trans Gr 35(6):1\u201311","journal-title":"ACM Trans Gr"},{"key":"1696_CR26","doi-asserted-by":"crossref","unstructured":"Riegler G, Koltun V (2020) Free view synthesis. In: European conference computer vision, pp 623\u2013640","DOI":"10.1007\/978-3-030-58529-7_37"},{"issue":"6","key":"1696_CR27","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3130800.3130855","volume":"36","author":"E Penner","year":"2017","unstructured":"Penner E, Zhang L (2017) Soft 3d reconstruction for view synthesis. ACM Trans Gr 36(6):1\u201311","journal-title":"ACM Trans Gr"},{"key":"1696_CR28","doi-asserted-by":"crossref","unstructured":"Jancosek M, Pajdla T (2011) Multi-view reconstruction preserving weakly-supported surfaces. In: IEEE conference on computer vision and pattern recognition, pp 3121\u20133128","DOI":"10.1109\/CVPR.2011.5995693"},{"key":"1696_CR29","doi-asserted-by":"crossref","unstructured":"Schonberger JL, Frahm J-M (2016) Structure-from-motion revisited. In: IEEE conference on computer vision and pattern recognition, pp 4104\u20134113","DOI":"10.1109\/CVPR.2016.445"},{"key":"1696_CR30","doi-asserted-by":"crossref","unstructured":"Choy CB, Xu D, Gwak J, Chen K, Savarese S (2016) 3d-r2n2: a unified approach for single and multi-view 3d object reconstruction. In: European conference computer vision, pp 628\u2013644","DOI":"10.1007\/978-3-319-46484-8_38"},{"key":"1696_CR31","doi-asserted-by":"crossref","unstructured":"Park JJ, Florence P, Straub J, Newcombe R, Lovegrove S (2019) Deepsdf: learning continuous signed distance functions for shape representation. In: IEEE conference on computer vision and pattern recognition, pp 165\u2013174","DOI":"10.1109\/CVPR.2019.00025"},{"key":"1696_CR32","doi-asserted-by":"crossref","unstructured":"Jiang Y, Ji D, Han Z, Zwicker M (2020) Sdfdiff: differentiable rendering of signed distance fields for 3d shape optimization. In: IEEE conference on computer vision and pattern recognition, pp 1251\u20131261","DOI":"10.1109\/CVPR42600.2020.00133"},{"key":"1696_CR33","doi-asserted-by":"crossref","unstructured":"Yu A, Ye V, Tancik M, Kanazawa A (2021) Pixelnerf: neural radiance fields from one or few images. In: IEEE conference on computer vision and pattern recognition, pp 4578\u20134587","DOI":"10.1109\/CVPR46437.2021.00455"},{"key":"1696_CR34","doi-asserted-by":"crossref","unstructured":"Jain A, Tancik M, Abbeel P (2021) Putting nerf on a diet: semantically consistent few-shot view synthesis. In: IEEE international conference on computer vision, pp 5885\u20135894","DOI":"10.1109\/ICCV48922.2021.00583"},{"key":"1696_CR35","doi-asserted-by":"crossref","unstructured":"Deng K, Liu A, Zhu J-Y, Ramanan D (2022) Depth-supervised nerf: fewer views and faster training for free. In: IEEE conference on computer vision and pattern recognition, pp 12882\u201312891","DOI":"10.1109\/CVPR52688.2022.01254"},{"key":"1696_CR36","doi-asserted-by":"crossref","unstructured":"Chibane J, Bansal A, Lazova V, Pons-Moll G (2021) Stereo radiance fields (srf): learning view synthesis for sparse views of novel scenes. In: IEEE conference on computer vision and pattern recognition, pp 7911\u20137920","DOI":"10.1109\/CVPR46437.2021.00782"},{"key":"1696_CR37","doi-asserted-by":"crossref","unstructured":"Yao Y, Luo Z, Li S, Fang T, Quan L (2018) Mvsnet: depth inference for unstructured multi-view stereo. In: European conference on computer vision, pp 767\u2013783","DOI":"10.1007\/978-3-030-01237-3_47"},{"issue":"12","key":"1696_CR38","doi-asserted-by":"publisher","first-page":"2663","DOI":"10.1109\/TMI.2018.2845918","volume":"37","author":"X Li","year":"2018","unstructured":"Li X, Chen H, Qi X, Dou Q, Fu C-W, Heng P-A (2018) H-denseunet: hybrid densely connected unet for liver and tumor segmentation from ct volumes. IEEE Trans Med Imaging 37(12):2663\u20132674","journal-title":"IEEE Trans Med Imaging"},{"key":"1696_CR39","unstructured":"Dosovitskiy A, Beyer L, Kolesnikov A, Weissenborn D, Zhai X, Unterthiner T, Dehghani M, Minderer M, Heigold G, Gelly S, et\u00a0al An image is worth 16x16 words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929"},{"issue":"4","key":"1696_CR40","doi-asserted-by":"publisher","first-page":"3989","DOI":"10.1007\/s40747-022-00944-x","volume":"9","author":"W Sun","year":"2023","unstructured":"Sun W, Kong X, Zhang Y (2023) Attention-guided video super-resolution with recurrent multi-scale spatial-temporal transformer. Complex Intell Syst 9(4):3989\u20134002","journal-title":"Complex Intell Syst"},{"key":"1696_CR41","first-page":"6840","volume":"33","author":"J Ho","year":"2020","unstructured":"Ho J, Jain A, Abbeel P (2020) Denoising diffusion probabilistic models. Adv Neural Inf Process Syst 33:6840\u20136851","journal-title":"Adv Neural Inf Process Syst"},{"key":"1696_CR42","unstructured":"Song Y, Garg S, Shi J, Ermon S (2020) Sliced score matching: a scalable approach to density and score estimation. In: Uncertainty in artificial intelligence, pp 574\u2013584"},{"key":"1696_CR43","doi-asserted-by":"crossref","unstructured":"Niemeyer M, Mescheder L, Oechsle M, Geiger A (2020) Differentiable volumetric rendering: learning implicit 3d representations without 3d supervision. In: IEEE conference on computer vision and pattern recognition, pp 3504\u20133515","DOI":"10.1109\/CVPR42600.2020.00356"},{"issue":"4","key":"1696_CR44","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3306346.3322980","volume":"38","author":"B Mildenhall","year":"2019","unstructured":"Mildenhall B, Srinivasan PP, Ortiz-Cayon R, Kalantari NK, Ramamoorthi R, Ng R, Kar A (2019) Local light field fusion: practical view synthesis with prescriptive sampling guidelines. ACM Trans Gr 38(4):1\u201314","journal-title":"ACM Trans Gr"},{"key":"1696_CR45","doi-asserted-by":"crossref","unstructured":"Zhang R, Isola P, Efros AA, Shechtman E, Wang O (2018) The unreasonable effectiveness of deep features as a perceptual metric. In: IEEE conference on computer vision and pattern recognition, pp 586\u2013595","DOI":"10.1109\/CVPR.2018.00068"},{"key":"1696_CR46","doi-asserted-by":"crossref","unstructured":"Barron JT, Mildenhall B, Tancik M, Hedman P, Martin-Brualla R, Srinivasan PP (2021) Mip-nerf: a multiscale representation for anti-aliasing neural radiance fields. In: IEEE international conference on computer vision, pp 5855\u20135864","DOI":"10.1109\/ICCV48922.2021.00580"},{"key":"1696_CR47","doi-asserted-by":"crossref","unstructured":"Wynn J, Turmukhambetov D (2023) Diffusionerf: regularizing neural radiance fields with denoising diffusion models. In: IEEE conference on computer vision and pattern recognition, pp 4180\u20134189","DOI":"10.1109\/CVPR52729.2023.00407"},{"key":"1696_CR48","doi-asserted-by":"crossref","unstructured":"Yang J, Pavone M, Wang Y (2023) Freenerf: improving few-shot neural rendering with free frequency regularization. In: IEEE conference on computer vision and pattern recognition, pp 8254\u20138263","DOI":"10.1109\/CVPR52729.2023.00798"}],"container-title":["Complex &amp; Intelligent Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-024-01696-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s40747-024-01696-6\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-024-01696-6.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,30]],"date-time":"2025-01-30T20:21:29Z","timestamp":1738268489000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s40747-024-01696-6"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,19]]},"references-count":48,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2025,1]]}},"alternative-id":["1696"],"URL":"https:\/\/doi.org\/10.1007\/s40747-024-01696-6","relation":{},"ISSN":["2199-4536","2198-6053"],"issn-type":[{"type":"print","value":"2199-4536"},{"type":"electronic","value":"2198-6053"}],"subject":[],"published":{"date-parts":[[2024,12,19]]},"assertion":[{"value":"12 December 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"23 November 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 December 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"78"}}