{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,27]],"date-time":"2025-03-27T20:13:14Z","timestamp":1743106394535,"version":"3.40.3"},"publisher-location":"Cham","reference-count":30,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031720581"},{"type":"electronic","value":"9783031720598"}],"license":[{"start":{"date-parts":[[2024,12,30]],"date-time":"2024-12-30T00:00:00Z","timestamp":1735516800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,30]],"date-time":"2024-12-30T00:00:00Z","timestamp":1735516800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72059-8_31","type":"book-chapter","created":{"date-parts":[[2024,12,29]],"date-time":"2024-12-29T14:00:25Z","timestamp":1735480825000},"page":"367-382","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Depth Priors in\u00a0Removal Neural Radiance Fields"],"prefix":"10.1007","author":[{"given":"Zhihao","family":"Guo","sequence":"first","affiliation":[]},{"given":"Peng","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,30]]},"reference":[{"issue":"1","key":"31_CR1","doi-asserted-by":"publisher","first-page":"99","DOI":"10.1145\/3503250","volume":"65","author":"B Mildenhall","year":"2021","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: representing scenes as neural radiance fields for view synthesis. Commun. ACM 65(1), 99\u2013106 (2021)","journal-title":"Commun. ACM"},{"key":"31_CR2","doi-asserted-by":"publisher","first-page":"102608","DOI":"10.1016\/j.rcim.2023.102608","volume":"85","author":"S Wang","year":"2024","unstructured":"Wang, S., Zhang, J., Wang, P., Law, J., Calinescu, R., Mihaylova, L.: A deep learning-enhanced digital twin framework for improving safety and reliability in human-robot collaborative manufacturing. Robot. Comput.-Integr. Manuf. 85, 102608 (2024)","journal-title":"Robot. Comput.-Integr. Manuf."},{"key":"31_CR3","doi-asserted-by":"crossref","unstructured":"Yang, B., et al.: Learning object-compositional neural radiance field for editable scene rendering. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 13779\u201313788 (2021)","DOI":"10.1109\/ICCV48922.2021.01352"},{"key":"31_CR4","doi-asserted-by":"crossref","unstructured":"Mirzaei, A., et al.: Spin-NeRF: multiview segmentation and perceptual inpainting with neural radiance fields. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 20669\u201320679 (2023)","DOI":"10.1109\/CVPR52729.2023.01980"},{"key":"31_CR5","doi-asserted-by":"crossref","unstructured":"Weder, S., et al.: Removing objects from neural radiance fields. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 16528\u201316538 (2023)","DOI":"10.1109\/CVPR52729.2023.01586"},{"key":"31_CR6","doi-asserted-by":"crossref","unstructured":"Deng, K., Liu, A., Zhu, J.-Y., Ramanan, D.: Depth-supervised NeRF: fewer views and faster training for free. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12882\u201312891 (2022)","DOI":"10.1109\/CVPR52688.2022.01254"},{"key":"31_CR7","unstructured":"Bhat, S.F., Birkl, R., Wofk, D., Wonka, P., M\u00fcller, M.: ZoeDepth: zero-shot transfer by combining relative and metric depth (2023)"},{"key":"31_CR8","doi-asserted-by":"crossref","unstructured":"Geiger, A., Lenz, p., Urtasun, R.: Are we ready for autonomous driving? The KITTI vision benchmark suite. In: 2012 IEEE Conference on Computer Vision and Pattern Recognition, pp. 3354\u20133361. IEEE (2012)","DOI":"10.1109\/CVPR.2012.6248074"},{"key":"31_CR9","unstructured":"Riedmiller, M., Lernen, A.: Multi layer perceptron. Machine Learning Lab Special Lecture, University of Freiburg, p. 24 (2014)"},{"key":"31_CR10","doi-asserted-by":"crossref","unstructured":"Wei, Y., Liu, S., Rao, Y., Zhao, W., Lu, J., Zhou, J.: NerfingMVS: guided optimization of neural radiance fields for indoor multi-view stereo. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 5610\u20135619 (2021)","DOI":"10.1109\/ICCV48922.2021.00556"},{"key":"31_CR11","doi-asserted-by":"crossref","unstructured":"Neff, T., et al.: DoNeRF: towards real-time rendering of compact neural radiance fields using depth oracle networks. In: Computer Graphics Forum, vol. 40, pp. 45\u201359. Wiley Online Library (2021)","DOI":"10.1111\/cgf.14340"},{"key":"31_CR12","doi-asserted-by":"crossref","unstructured":"Roessle, B., Barron, J.T., Mildenhall, B., Srinivasan, P.P., Nie\u00dfner, M.: Dense depth priors for neural radiance fields from sparse input views. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12892\u201312901 (2022)","DOI":"10.1109\/CVPR52688.2022.01255"},{"key":"31_CR13","doi-asserted-by":"crossref","unstructured":"Wu, Q., et al.: Object-compositional neural implicit surfaces. In: European Conference on Computer Vision, pp. 197\u2013213. Springer (2022)","DOI":"10.1007\/978-3-031-19812-0_12"},{"key":"31_CR14","doi-asserted-by":"crossref","unstructured":"Suvorov, R., et al.: Resolution-robust large mask inpainting with Fourier convolutions. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 2149\u20132159 (2022)","DOI":"10.1109\/WACV51458.2022.00323"},{"key":"31_CR15","doi-asserted-by":"crossref","unstructured":"Sch\u00f6nberger, J.L., Frahm, J.-M.: Structure-from-motion revisited. In: Conference on Computer Vision and Pattern Recognition (CVPR) (2016)","DOI":"10.1109\/CVPR.2016.445"},{"key":"31_CR16","doi-asserted-by":"crossref","unstructured":"Sch\u00f6nberger, J.L., Zheng, E., Frahm, J.-M., Pollefeys, M.: Pixelwise view selection for unstructured multi-view stereo. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) Computer Vision \u2013 ECCV 2016, pp. 501\u2013518. Springer, Cham (2016)","DOI":"10.1007\/978-3-319-46487-9_31"},{"key":"31_CR17","doi-asserted-by":"crossref","unstructured":"Triggs, B., McLauchlan, P.F., Hartley, R.I., Fitzgibbon, A.W.: Bundle adjustment\u2014a modern synthesis. In: Vision Algorithms: Theory and Practice: International Workshop on Vision Algorithms Corfu, Greece, 21\u201322 September 1999, pp. 298\u2013372. Springer (2000)","DOI":"10.1007\/3-540-44480-7_21"},{"key":"31_CR18","doi-asserted-by":"publisher","first-page":"151","DOI":"10.1007\/s11263-006-0031-y","volume":"75","author":"D Hoiem","year":"2007","unstructured":"Hoiem, D., Efros, A.A., Hebert, M.: Recovering surface layout from an image. Int. J. Comput. Vision 75, 151\u2013172 (2007)","journal-title":"Int. J. Comput. Vision"},{"key":"31_CR19","doi-asserted-by":"crossref","unstructured":"Liu, C., Yuen, J., Torralba, A., Sivic, J., Freeman, W.T.: Sift flow: dense correspondence across different scenes. In: Computer Vision\u2013ECCV 2008: 10th European Conference on Computer Vision, Marseille, France, 12\u201318 October 2008, Part III, pp. 28\u201342. Springer (2008)","DOI":"10.1007\/978-3-540-88690-7_3"},{"key":"31_CR20","doi-asserted-by":"crossref","unstructured":"Patni, S., Agarwal, A., Arora, C.: EcoDepth: effective conditioning of diffusion models for monocular depth estimation (2024)","DOI":"10.1109\/CVPR52733.2024.02672"},{"key":"31_CR21","doi-asserted-by":"crossref","unstructured":"Yang, L., Kang, B., Huang, Z., Xu, X., Feng, J., Zhao, H.: Depth anything: unleashing the power of large-scale unlabeled data (2024)","DOI":"10.1109\/CVPR52733.2024.00987"},{"key":"31_CR22","unstructured":"Bhat, S.F., Alhashim, I., Wonka, P.: AdaBins: depth estimation using adaptive bins. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4009\u20134018 (2021)"},{"key":"31_CR23","doi-asserted-by":"crossref","unstructured":"Gasperini, S., Morbitzer, N., Jung, H., Navab, N., Tombari, F.: Robust monocular depth estimation under challenging conditions. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 8177\u20138186 (2023)","DOI":"10.1109\/ICCV51070.2023.00751"},{"issue":"3","key":"31_CR24","doi-asserted-by":"publisher","first-page":"1623","DOI":"10.1109\/TPAMI.2020.3019967","volume":"44","author":"R Ranftl","year":"2020","unstructured":"Ranftl, R., Lasinger, K., Hafner, D., Schindler, K., Koltun, V.: Towards robust monocular depth estimation: mixing datasets for zero-shot cross-dataset transfer. IEEE Trans. Pattern Anal. Mach. Intell. 44(3), 1623\u20131637 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"31_CR25","unstructured":"Dosovitskiy, A., et\u00a0al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"31_CR26","doi-asserted-by":"crossref","unstructured":"Takagi, Y., Nishimoto, S.: High-resolution image reconstruction with latent diffusion models from human brain activity. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14453\u201314463 (2023)","DOI":"10.1109\/CVPR52729.2023.01389"},{"key":"31_CR27","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 586\u2013595 (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"key":"31_CR28","doi-asserted-by":"crossref","unstructured":"Godard, C., Aodha, O.M., Firman, M., Brostow, G.J.: Digging into self-supervised monocular depth estimation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV) (2019)","DOI":"10.1109\/ICCV.2019.00393"},{"issue":"4","key":"31_CR29","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. IEEE Trans. Image Process. 13(4), 600\u2013612 (2004)","journal-title":"IEEE Trans. Image Process."},{"key":"31_CR30","doi-asserted-by":"crossref","unstructured":"Lin, Y., Wang, P., Wang, Z., Ali, S., Mihaylova, L.: Towards automated remote sizing and hot steel manufacturing with image registration and fusion. J. Intell. Manuf. 1\u201318 (2023)","DOI":"10.1007\/s10845-023-02251-9"}],"container-title":["Lecture Notes in Computer Science","Towards Autonomous Robotic Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72059-8_31","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,29]],"date-time":"2024-12-29T14:04:28Z","timestamp":1735481068000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72059-8_31"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,30]]},"ISBN":["9783031720581","9783031720598"],"references-count":30,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72059-8_31","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,12,30]]},"assertion":[{"value":"30 December 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"TAROS","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Annual Conference Towards Autonomous Robotic Systems","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"London","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"United Kingdom","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 September 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"taros2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/taros-conference.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}