{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,18]],"date-time":"2025-12-18T14:28:48Z","timestamp":1766068128288,"version":"3.40.3"},"publisher-location":"Cham","reference-count":79,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031734632"},{"type":"electronic","value":"9783031734649"}],"license":[{"start":{"date-parts":[[2024,12,4]],"date-time":"2024-12-04T00:00:00Z","timestamp":1733270400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,4]],"date-time":"2024-12-04T00:00:00Z","timestamp":1733270400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73464-9_8","type":"book-chapter","created":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T09:37:32Z","timestamp":1733218652000},"page":"121-138","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["DoubleTake: Geometry Guided Depth Estimation"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-4074-3314","authenticated-orcid":false,"given":"Mohamed","family":"Sayed","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8911-3241","authenticated-orcid":false,"given":"Filippo","family":"Aleotti","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7461-5663","authenticated-orcid":false,"given":"Jamie","family":"Watson","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0003-9917-4827","authenticated-orcid":false,"given":"Zawar","family":"Qureshi","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3215-7857","authenticated-orcid":false,"given":"Guillermo","family":"Garcia-Hernando","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8472-3828","authenticated-orcid":false,"given":"Gabriel","family":"Brostow","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0001-3108-5030","authenticated-orcid":false,"given":"Sara","family":"Vicente","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0833-2021","authenticated-orcid":false,"given":"Michael","family":"Firman","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,4]]},"reference":[{"key":"8_CR1","unstructured":"Apple: ARKit (2023). https:\/\/developer.apple.com\/documentation\/arkit. Accessed 5 Oct 2023"},{"key":"8_CR2","unstructured":"Bozic, A., Palafox, P., Thies, J., Dai, A., Nie\u00dfner, M.: TransformerFusion: monocular RGB scene reconstruction using transformers. In: NeurIPS (2021)"},{"key":"8_CR3","doi-asserted-by":"crossref","unstructured":"Cai, C., Ji, P., Yan, Q., Xu, Y.: RIAV-MVS: recurrent-indexing an asymmetric volume for multi-view stereo. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00095"},{"key":"8_CR4","doi-asserted-by":"crossref","unstructured":"Casser, V., Pirk, S., Mahjourian, R., Angelova, A.: Depth prediction without the sensors: leveraging structure for unsupervised learning from monocular videos. In: AAAI (2019)","DOI":"10.1609\/aaai.v33i01.33018001"},{"key":"8_CR5","doi-asserted-by":"crossref","unstructured":"Chang, J.R., Chen, Y.S.: Pyramid stereo matching network. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00567"},{"key":"8_CR6","doi-asserted-by":"crossref","unstructured":"Chen, Y., Schmid, C., Sminchisescu, C.: Self-supervised learning with geometric constraints in monocular video: connecting flow, depth, and camera. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00716"},{"key":"8_CR7","unstructured":"Cheng, X., Wang, P., Yang, R.: Learning depth with convolutional spatial propagation network. PAMI (2019)"},{"key":"8_CR8","doi-asserted-by":"crossref","unstructured":"Cheng, Z., Yang, J., Li, H.: Stereo matching in time: 100+ FPS video stereo matching for extended reality. In: WACV (2023)","DOI":"10.1109\/WACV57701.2024.00852"},{"key":"8_CR9","doi-asserted-by":"crossref","unstructured":"Choe, J., Joo, K., Imtiaz, T., Kweon, I.S.: Volumetric propagation network: stereo-lidar fusion for long-range depth estimation. IEEE Robot. Autom. Lett. (2021)","DOI":"10.1109\/LRA.2021.3068712"},{"key":"8_CR10","doi-asserted-by":"crossref","unstructured":"Collins, R.T.: A space-sweep approach to true multi-image matching. In: CVPR (1996)","DOI":"10.1109\/CVPR.1996.517097"},{"key":"8_CR11","doi-asserted-by":"crossref","unstructured":"Conti, A., Poggi, M., Mattoccia, S.: Sparsity agnostic depth completion. In: WACV (2023)","DOI":"10.1109\/WACV56688.2023.00582"},{"key":"8_CR12","doi-asserted-by":"crossref","unstructured":"Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nie\u00dfner, M.: ScanNet: richly-annotated 3D reconstructions of indoor scenes. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.261"},{"key":"8_CR13","doi-asserted-by":"crossref","unstructured":"Deng, K., Liu, A., Zhu, J.Y., Ramanan, D.: Depth-supervised NeRF: fewer views and faster training for free. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01254"},{"key":"8_CR14","doi-asserted-by":"crossref","unstructured":"Du, R., et\u00a0al.: DepthLab: real-time 3D interaction with depth maps for mobile augmented reality. In: ACM Symposium on User Interface Software and Technology (2020)","DOI":"10.1145\/3379337.3415881"},{"key":"8_CR15","doi-asserted-by":"crossref","unstructured":"Duzceker, A., Galliani, S., Vogel, C., Speciale, P., Dusmanu, M., Pollefeys, M.: DeepVideoMVS: multi-view stereo on video with recurrent spatio-temporal fusion. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01507"},{"key":"8_CR16","unstructured":"Fu, Q., Xu, Q., Ong, Y.S., Tao, W.: Geo-Neus: geometry-consistent neural implicit surfaces learning for multi-view reconstruction. In: NeurIPS (2022)"},{"key":"8_CR17","doi-asserted-by":"crossref","unstructured":"Furukawa, Y., Hern\u00e1ndez, C.: Multi-view stereo: a tutorial, foundations and trends\u00ae in computer graphics and vision (2015)","DOI":"10.1561\/9781601988379"},{"key":"8_CR18","doi-asserted-by":"crossref","unstructured":"Gao, H., Mao, W., Liu, M.: VisFusion: visibility-aware online 3D scene reconstruction from videos. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01661"},{"key":"8_CR19","doi-asserted-by":"crossref","unstructured":"Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? The KITTI vision benchmark suite. In: CVPR (2012)","DOI":"10.1109\/CVPR.2012.6248074"},{"key":"8_CR20","doi-asserted-by":"crossref","unstructured":"Gu, X., Fan, Z., Zhu, S., Dai, Z., Tan, F., Tan, P.: Cascade cost volume for high-resolution multi-view stereo and stereo matching. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00257"},{"key":"8_CR21","doi-asserted-by":"crossref","unstructured":"Gu\u00e9don, A., Lepetit, V.: SuGaR: surface-aligned Gaussian splatting for efficient 3D mesh reconstruction and high-quality mesh rendering. arXiv preprint arXiv:2311.12775 (2023)","DOI":"10.1109\/CVPR52733.2024.00512"},{"key":"8_CR22","doi-asserted-by":"crossref","unstructured":"Guizilini, V., Ambrus, R., Burgard, W., Gaidon, A.: Sparse auxiliary networks for unified monocular depth prediction and completion. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01093"},{"key":"8_CR23","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"8_CR24","doi-asserted-by":"crossref","unstructured":"Hou, Y., Kannala, J., Solin, A.: Multi-view stereo by temporal nonparametric fusion. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00274"},{"key":"8_CR25","doi-asserted-by":"crossref","unstructured":"Huang, P.H., Matzen, K., Kopf, J., Ahuja, N., Huang, J.B.: DeepMVS: learning multi-view stereopsis. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00298"},{"key":"8_CR26","unstructured":"Im, S., Jeon, H.G., Lin, S., Kweon, I.S.: DPSNet: end-to-end deep plane sweep stereo. In: ICLR (2019)"},{"key":"8_CR27","doi-asserted-by":"crossref","unstructured":"Izquierdo, S., Civera, J.: SfM-TTR: using structure from motion for test-time refinement of single-view depth networks. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.02056"},{"key":"8_CR28","doi-asserted-by":"crossref","unstructured":"K\u00e4hler, O., Prisacariu, V.A., Ren, C.Y., Sun, X., Torr, P.H.S., Murray, D.W.: Very high frame rate volumetric integration of depth images on mobile device. IEEE Trans. Vis. Comput. Graph. (Proceedings International Symposium on Mixed and Augmented Reality 2015) 22(11) (2015)","DOI":"10.1109\/TVCG.2015.2459891"},{"key":"8_CR29","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"500","DOI":"10.1007\/978-3-319-46484-8_30","volume-title":"Computer Vision \u2013 ECCV 2016","author":"O K\u00e4hler","year":"2016","unstructured":"K\u00e4hler, O., Prisacariu, V.A., Murray, D.W.: Real-time large-scale dense 3D reconstruction with loop closure. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9912, pp. 500\u2013516. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46484-8_30"},{"key":"8_CR30","doi-asserted-by":"crossref","unstructured":"Kendall, A., et al.: End-to-end learning of geometry and context for deep stereo regression. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.17"},{"key":"8_CR31","doi-asserted-by":"crossref","unstructured":"Kerbl, B., Kopanas, G., Leimk\u00fchler, T., Drettakis, G.: 3D Gaussian splatting for real-time radiance field rendering. ACM Trans. Graph. 42(4) (2023)","DOI":"10.1145\/3592433"},{"key":"8_CR32","doi-asserted-by":"crossref","unstructured":"Khan, N., Penner, E., Lanman, D., Xiao, L.: Temporally consistent online depth estimation using point-based fusion. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00880"},{"key":"8_CR33","doi-asserted-by":"crossref","unstructured":"Kulhanek, J., Sattler, T.: Tetra-NeRF: representing neural radiance fields using tetrahedra. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01692"},{"key":"8_CR34","doi-asserted-by":"crossref","unstructured":"Kuznietsov, Y., Proesmans, M., Van\u00a0Gool, L.: CoMoDA: continuous monocular depth adaptation using past experiences. In: WACV (2021)","DOI":"10.1109\/WACV48630.2021.00295"},{"key":"8_CR35","doi-asserted-by":"crossref","unstructured":"Li, Z., et al.: Neuralangelo: high-fidelity neural surface reconstruction. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00817"},{"key":"8_CR36","doi-asserted-by":"crossref","unstructured":"Lipson, L., Teed, Z., Deng, J.: Raft-stereo: multilevel recurrent field transforms for stereo matching. In: 3DV (2021)","DOI":"10.1109\/3DV53792.2021.00032"},{"key":"8_CR37","doi-asserted-by":"crossref","unstructured":"Lorensen, W.E., Cline, H.E.: Marching cubes: a high resolution 3D surface construction algorithm. In: Seminal Graphics: Pioneering Efforts that Shaped the Field (1998)","DOI":"10.1145\/280811.281026"},{"key":"8_CR38","doi-asserted-by":"crossref","unstructured":"Luo, X., Huang, J.B., Szeliski, R., Matzen, K., Kopf, J.: Consistent video depth estimation. In: ACM SIGGRAPH (2020)","DOI":"10.1145\/3386569.3392377"},{"key":"8_CR39","doi-asserted-by":"crossref","unstructured":"Ma, F., Cavalheiro, G.V., Karaman, S.: Self-supervised sparse-to-dense: self-supervised depth completion from lidar and monocular camera. In: ICRA (2019)","DOI":"10.1109\/ICRA.2019.8793637"},{"key":"8_CR40","doi-asserted-by":"crossref","unstructured":"Ma, F., Karaman, S.: Sparse-to-dense: depth prediction from sparse depth samples and a single image. In: ICRA (2018)","DOI":"10.1109\/ICRA.2018.8460184"},{"key":"8_CR41","doi-asserted-by":"crossref","unstructured":"Ma, Z., Teed, Z., Deng, J.: Multiview stereo with cascaded epipolar RAFT. In: ECCV (2022)","DOI":"10.1007\/978-3-031-19821-2_42"},{"key":"8_CR42","unstructured":"McCraith, R., Neumann, L., Zisserman, A., Vedaldi, A.: Monocular depth estimation with self-supervised instance adaptation. arXiv:2004.05821 (2020)"},{"key":"8_CR43","doi-asserted-by":"crossref","unstructured":"Menze, M., Geiger, A.: Object scene flow for autonomous vehicles. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7298925"},{"key":"8_CR44","doi-asserted-by":"crossref","unstructured":"Mildenhall, B., Srinivasan, P.P., Tancik, M., Barron, J.T., Ramamoorthi, R., Ng, R.: NeRF: representing scenes as neural radiance fields for view synthesis. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58452-8_24"},{"key":"8_CR45","doi-asserted-by":"crossref","unstructured":"Murez, Z., van As, T., Bartolozzi, J., Sinha, A., Badrinarayanan, V., Rabinovich, A.: Atlas: end-to-end 3D scene reconstruction from posed images. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58571-6_25"},{"key":"8_CR46","doi-asserted-by":"crossref","unstructured":"Peng, S., Niemeyer, M., Mescheder, L., Pollefeys, M., Geiger, A.: Convolutional occupancy networks. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58580-8_31"},{"key":"8_CR47","doi-asserted-by":"crossref","unstructured":"Poggi, M., Conti, A., Mattoccia, S.: Multi-view guided multi-view stereo. In: IROS (2022)","DOI":"10.1109\/IROS47612.2022.9982010"},{"key":"8_CR48","doi-asserted-by":"crossref","unstructured":"Rakotosaona, M.J., Manhardt, F., Arroyo, D.M., Niemeyer, M., Kundu, A., Tombari, F.: NeRFMeshing: distilling neural radiance fields into geometrically-accurate 3D meshes. In: 3DV (2023)","DOI":"10.1109\/3DV62453.2024.00093"},{"key":"8_CR49","unstructured":"Ravi, N., et al.: Accelerating 3D deep learning with PyTorch3D. arXiv:2007.08501 (2020)"},{"key":"8_CR50","doi-asserted-by":"crossref","unstructured":"Rich, A., Stier, N., Sen, P., H\u00f6llerer, T.: 3DVNet: multi-view depth prediction and volumetric refinement. In: 3DV (2021)","DOI":"10.1109\/3DV53792.2021.00079"},{"key":"8_CR51","doi-asserted-by":"crossref","unstructured":"Roessle, B., Barron, J.T., Mildenhall, B., Srinivasan, P.P., Nie\u00dfner, M.: Dense depth priors for neural radiance fields from sparse input views. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01255"},{"key":"8_CR52","doi-asserted-by":"crossref","unstructured":"Sayed, M., Gibson, J., Watson, J., Prisacariu, V., Firman, M., Godard, C.: SimpleRecon: 3D reconstruction without 3D convolutions. In: ECCV (2022)","DOI":"10.1007\/978-3-031-19827-4_1"},{"key":"8_CR53","doi-asserted-by":"crossref","unstructured":"Sch\u00f6nberger, J.L., Zheng, E., Frahm, J.M., Pollefeys, M.: Pixelwise view selection for unstructured multi-view stereo. In: ECCV (2016)","DOI":"10.1007\/978-3-319-46487-9_31"},{"key":"8_CR54","doi-asserted-by":"crossref","unstructured":"Sch\u00f6nberger, J.L., Frahm, J.M.: Structure-from-motion revisited. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.445"},{"key":"8_CR55","doi-asserted-by":"crossref","unstructured":"Shotton, J., Glocker, B., Zach, C., Izadi, S., Criminisi, A., Fitzgibbon, A.: Scene coordinate regression forests for camera relocalization in RGB-D images. In: CVPR (2013)","DOI":"10.1109\/CVPR.2013.377"},{"key":"8_CR56","doi-asserted-by":"crossref","unstructured":"Shu, C., Yu, K., Duan, Z., Yang, K.: Feature-metric loss for self-supervised learning of depth and egomotion. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58529-7_34"},{"key":"8_CR57","doi-asserted-by":"crossref","unstructured":"Sinha, A., Murez, Z., Bartolozzi, J., Badrinarayanan, V., Rabinovich, A.: DELTAS: depth estimation by learning triangulation and densification of sparse points. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58589-1_7"},{"key":"8_CR58","doi-asserted-by":"crossref","unstructured":"Song, S., Truong, K.G., Kim, D., Jo, S.: Prior depth-based multi-view stereo network for online 3D model reconstruction. Pattern Recogn. (2023)","DOI":"10.1016\/j.patcog.2022.109198"},{"key":"8_CR59","doi-asserted-by":"crossref","unstructured":"Stier, N., et al.: Finerecon: depth-aware feed-forward network for detailed 3D reconstruction. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01689"},{"key":"8_CR60","doi-asserted-by":"crossref","unstructured":"Stier, N., Rich, A., Sen, P., H\u00f6llerer, T.: VoRTX: volumetric 3D reconstruction with transformers for voxelwise view selection and fusion. In: 3DV (2021)","DOI":"10.1109\/3DV53792.2021.00042"},{"key":"8_CR61","doi-asserted-by":"crossref","unstructured":"Sun, J., Xie, Y., Chen, L., Zhou, X., Bao, H.: NeuralRecon: real-time coherent 3D reconstruction from monocular video. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01534"},{"key":"8_CR62","unstructured":"Tan, M., Le, Q.: Efficientnetv2: smaller models and faster training. In: ICML (2021)"},{"key":"8_CR63","doi-asserted-by":"crossref","unstructured":"Uhrig, J., Schneider, N., Schneider, L., Franke, U., Brox, T., Geiger, A.: Sparsity invariant CNNs. In: 3DV (2017)","DOI":"10.1109\/3DV.2017.00012"},{"key":"8_CR64","doi-asserted-by":"crossref","unstructured":"Uy, M.A., Martin-Brualla, R., Guibas, L., Li, K.: SCADE: NeRFs from space carving with ambiguity-aware depth estimates. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01585"},{"key":"8_CR65","doi-asserted-by":"crossref","unstructured":"Valentin, J., et\u00a0al.: Depth from motion for smartphone AR. Trans. Graph. (2018)","DOI":"10.1145\/3272127.3275041"},{"key":"8_CR66","doi-asserted-by":"crossref","unstructured":"Wald, J., Avetisyan, A., Navab, N., Tombari, F., Niessner, M.: RIO: 3D object instance re-localization in changing indoor environments. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00775"},{"key":"8_CR67","doi-asserted-by":"crossref","unstructured":"Wang, K., Shen, S.: MVDepthNet: real-time multiview depth estimation neural network. In: 3DV (2018)","DOI":"10.1109\/3DV.2018.00037"},{"key":"8_CR68","unstructured":"Chen, H., Yang, H., Zhang, Y.: Depth completion using geometry-aware embedding. In: ICRA (2022)"},{"key":"8_CR69","doi-asserted-by":"crossref","unstructured":"Wong, A., Soatto, S.: Unsupervised depth completion with calibrated backprojection layers. In: ICCV (2021)","DOI":"10.1109\/ICCV48922.2021.01251"},{"key":"8_CR70","doi-asserted-by":"crossref","unstructured":"Xin, Y., Zuo, X., Lu, D., Leutenegger, S.: SimpleMapping: real-time visual-inertial dense mapping with deep multi-view stereo. In: ISMAR (2023)","DOI":"10.1109\/ISMAR59233.2023.00042"},{"key":"8_CR71","doi-asserted-by":"crossref","unstructured":"Yang, J., Mao, W., Alvarez, J.M., Liu, M.: Cost volume pyramid based depth inference for multi-view stereo. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00493"},{"key":"8_CR72","doi-asserted-by":"crossref","unstructured":"Yao, Y., Luo, Z., Li, S., Fang, T., Quan, L.: MVSNet: depth inference for unstructured multi-view stereo. In: ECCV (2018)","DOI":"10.1007\/978-3-030-01237-3_47"},{"key":"8_CR73","unstructured":"Yariv, L., Gu, J., Kasten, Y., Lipman, Y.: Volume rendering of neural implicit surfaces. In: NeurIPS (2021)"},{"key":"8_CR74","unstructured":"Yu, Z., Peng, S., Niemeyer, M., Sattler, T., Geiger, A.: MonoSDF: exploring monocular geometric cues for neural implicit surface reconstruction. In: NeurIPS (2022)"},{"key":"8_CR75","doi-asserted-by":"crossref","unstructured":"Zhang, F., Prisacariu, V., Yang, R., Torr, P.H.: GA-Net: guided aggregation net for end-to-end stereo matching. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00027"},{"key":"8_CR76","doi-asserted-by":"crossref","unstructured":"Zhang, F., Qi, X., Yang, R., Prisacariu, V., Wah, B., Torr, P.: Domain-invariant stereo matching networks. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58536-5_25"},{"key":"8_CR77","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Peng, R., Hu, Y., Wang, R.: GeoMVSNet: learning multi-view stereo with geometry perception. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.02060"},{"key":"8_CR78","doi-asserted-by":"crossref","unstructured":"Zhou, Z., Rahman\u00a0Siddiquee, M.M., Tajbakhsh, N., Liang, J.: UNet++: a nested U-Net architecture for medical image segmentation. In: Deep learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support (2018)","DOI":"10.1007\/978-3-030-00889-5_1"},{"key":"8_CR79","doi-asserted-by":"crossref","unstructured":"Zuo, X., Yang, N., Merrill, N., Xu, B., Leutenegger, S.: Incremental dense reconstruction from monocular video with guided sparse feature volume fusion. IEEE Robot. Autom. Lett. (2023)","DOI":"10.1109\/LRA.2023.3273509"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73464-9_8","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T10:06:29Z","timestamp":1733220389000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73464-9_8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,4]]},"ISBN":["9783031734632","9783031734649"],"references-count":79,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73464-9_8","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,12,4]]},"assertion":[{"value":"4 December 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}