{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T07:38:54Z","timestamp":1740123534149,"version":"3.37.3"},"reference-count":63,"publisher":"Springer Science and Business Media LLC","issue":"14","license":[{"start":{"date-parts":[[2024,6,5]],"date-time":"2024-06-05T00:00:00Z","timestamp":1717545600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,6,5]],"date-time":"2024-06-05T00:00:00Z","timestamp":1717545600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100022650","name":"Anhui Postdoctoral Science Foundation","doi-asserted-by":"publisher","award":["2022B623"],"award-info":[{"award-number":["2022B623"]}],"id":[{"id":"10.13039\/501100022650","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003995","name":"Natural Science Foundation of Anhui Province","doi-asserted-by":"crossref","award":["2108085QF258"],"award-info":[{"award-number":["2108085QF258"]}],"id":[{"id":"10.13039\/501100003995","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["62102003"],"award-info":[{"award-number":["62102003"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Medical Special Cultivation Project of Anhui University of Science and Technology","award":["YZ2023H2B003"],"award-info":[{"award-number":["YZ2023H2B003"]}]},{"name":"Huainan City Science and Technology Plan Project","award":["2023A316"],"award-info":[{"award-number":["2023A316"]}]},{"name":"University-level general projects of Anhui University of science and technology","award":["xjyb2020-04"],"award-info":[{"award-number":["xjyb2020-04"]}]},{"name":"the University Synergy Innovation Program of Anhui Province","award":["GXXT-2021-006","GX XT-2022-038"],"award-info":[{"award-number":["GXXT-2021-006","GX XT-2022-038"]}]},{"name":"Central guiding local technology development special funds","award":["202107d06020001"],"award-info":[{"award-number":["202107d06020001"]}]},{"name":"Funded by Research Foundation of the Institute of Environment-friendly Materials and Occupational Health (Wuhu), Anhui University of Science and Technology","award":["ALW2021YF04"],"award-info":[{"award-number":["ALW2021YF04"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Supercomput"],"published-print":{"date-parts":[[2024,9]]},"DOI":"10.1007\/s11227-024-06205-7","type":"journal-article","created":{"date-parts":[[2024,6,5]],"date-time":"2024-06-05T10:03:38Z","timestamp":1717581818000},"page":"21023-21047","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["EDFIDepth: enriched multi-path vision transformer feature interaction networks for monocular depth estimation"],"prefix":"10.1007","volume":"80","author":[{"given":"Chenxing","family":"Xia","sequence":"first","affiliation":[]},{"given":"Mengge","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Xiuju","family":"Gao","sequence":"additional","affiliation":[]},{"given":"Bin","family":"Ge","sequence":"additional","affiliation":[]},{"given":"Kuan-Ching","family":"Li","sequence":"additional","affiliation":[]},{"given":"Xianjin","family":"Fang","sequence":"additional","affiliation":[]},{"given":"Yan","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Xingzhu","family":"Liang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,6,5]]},"reference":[{"issue":"7","key":"6205_CR1","doi-asserted-by":"publisher","first-page":"2200","DOI":"10.1016\/j.patcog.2007.12.014","volume":"41","author":"AS Malik","year":"2008","unstructured":"Malik AS, Choi TS (2008) A novel algorithm for estimation of depth map using image focus for 3D shape recovery in the presence of noise. Pattern Recogn 41(7):2200\u20132225","journal-title":"Pattern Recogn"},{"issue":"5","key":"6205_CR2","doi-asserted-by":"publisher","first-page":"824","DOI":"10.1109\/TPAMI.2008.132","volume":"31","author":"A Saxena","year":"2008","unstructured":"Saxena A, Sun M, Ng AY (2008) Make3d: learning 3d scene structure from a single still image. IEEE Trans Pattern Anal Mach Intell 31(5):824\u2013840","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"6205_CR3","doi-asserted-by":"crossref","unstructured":"Geiger A, Philip L, Raquel U (2012) Are we ready for autonomous driving? the kitti vision benchmark suite. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 3354\u20133361","DOI":"10.1109\/CVPR.2012.6248074"},{"key":"6205_CR4","doi-asserted-by":"crossref","unstructured":"Hoiem D, Stein AN, Efros AA et\u00a0al (2007) Recovering occlusion boundaries from a single image. In: Proceedings of the IEEE International Conference on Computer Vision, pp 1\u20138","DOI":"10.1109\/ICCV.2007.4408985"},{"key":"6205_CR5","first-page":"1161","volume":"18","author":"A Saxena","year":"2005","unstructured":"Saxena A, Chung S, Ng A (2005) Learning depth from single monocular images. Adv Neural Inf Process Syst 18:1161\u20131168","journal-title":"Adv Neural Inf Process Syst"},{"key":"6205_CR6","doi-asserted-by":"crossref","unstructured":"Eigen D, Fergus R (2015) Predicting depth, surface normals and semantic labels with a common multi-scale convolutional architecture. In: Proceedings of the IEEE International Conference on Computer Vision, pp 2650\u20132658","DOI":"10.1109\/ICCV.2015.304"},{"key":"6205_CR7","first-page":"2366","volume":"27","author":"D Eigen","year":"2014","unstructured":"Eigen D, Puhrsch C, Fergus R (2014) Depth map prediction from a single image using a multi-scale deep network. Adv Neural Inf Process Syst 27:2366\u20132374","journal-title":"Adv Neural Inf Process Syst"},{"issue":"11","key":"6205_CR8","doi-asserted-by":"publisher","first-page":"4381","DOI":"10.1109\/TCSVT.2021.3049869","volume":"31","author":"M Song","year":"2021","unstructured":"Song M, Lim S, Kim W (2021) Monocular depth estimation using laplacian pyramid-based depth residuals. IEEE Trans Circuits Syst Video Technol 31(11):4381\u20134393","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"6205_CR9","unstructured":"Lee JH, Han MK, Ko DW et\u00a0al (2019) From big to small: Multi-scale local planar guidance for monocular depth estimation. arXiv preprint arXiv:1907.10326"},{"key":"6205_CR10","doi-asserted-by":"crossref","unstructured":"Fu H, Gong M, Wang C et\u00a0al (2018) Deep ordinal regression network for monocular depth estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 2002\u20132011","DOI":"10.1109\/CVPR.2018.00214"},{"key":"6205_CR11","doi-asserted-by":"crossref","unstructured":"Yin W, Liu Y, Shen C et\u00a0al (2019) Enforcing geometric constraints of virtual normal for depth prediction. In: Proceedings of the IEEE International Conference on Computer Vision, pp 5684\u20135693","DOI":"10.1109\/ICCV.2019.00578"},{"issue":"10","key":"6205_CR12","doi-asserted-by":"publisher","first-page":"2024","DOI":"10.1109\/TPAMI.2015.2505283","volume":"38","author":"F Liu","year":"2015","unstructured":"Liu F, Shen C, Lin G et al (2015) Learning depth from single monocular images using deep convolutional neural fields. IEEE Trans Pattern Anal Mach Intell 38(10):2024\u20132039","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"6205_CR13","doi-asserted-by":"crossref","unstructured":"Ranftl R, Bochkovskiy A, Koltun V (2021) Vision transformers for dense prediction. In: Proceedings of the IEEE International Conference on Computer Vision, pp 12179\u201312188","DOI":"10.1109\/ICCV48922.2021.01196"},{"issue":"6","key":"6205_CR14","doi-asserted-by":"publisher","first-page":"837","DOI":"10.1007\/s11633-023-1458-0","volume":"20","author":"Z Li","year":"2023","unstructured":"Li Z, Chen Z, Liu X et al (2023) Depthformer: exploiting long-range correlation and local information for accurate monocular depth estimation. Machine Intelligence Research 20(6):837\u2013854","journal-title":"Machine Intelligence Research"},{"key":"6205_CR15","doi-asserted-by":"crossref","unstructured":"Zhou T, Brown M, Snavely N et\u00a0al (2017) Unsupervised learning of depth and ego-motion from video. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 1851\u20131858","DOI":"10.1109\/CVPR.2017.700"},{"key":"6205_CR16","doi-asserted-by":"crossref","unstructured":"Godard C, Mac\u00a0Aodha O, Firman M et\u00a0al (2019) Digging into self-supervised monocular depth estimation. In: Proceedings of the IEEE International Conference on Computer Vision, pp 3828\u20133838","DOI":"10.1109\/ICCV.2019.00393"},{"key":"6205_CR17","doi-asserted-by":"crossref","unstructured":"Agarwal A, Arora C (2023) Attention attention everywhere: Monocular depth prediction with skip attention. In: Proceedings of the IEEE Winter Conference on Applications of Computer Vision, pp 5861\u20135870","DOI":"10.1109\/WACV56688.2023.00581"},{"key":"6205_CR18","unstructured":"Bhat SF, Birkl R, Wofk D et\u00a0al (2023) Zoedepth: Zero-shot transfer by combining relative and metric depth. arXiv preprint arXiv:2302.12288"},{"key":"6205_CR19","doi-asserted-by":"crossref","unstructured":"Yang G, Tang H, Ding M et\u00a0al (2021) Transformer-based attention networks for continuous pixel-wise prediction. In: Proceedings of the IEEE International Conference on Computer vision, pp 16269\u201316279","DOI":"10.1109\/ICCV48922.2021.01596"},{"key":"6205_CR20","unstructured":"Dosovitskiy A, Beyer L, Kolesnikov A et\u00a0al (2020) An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929"},{"key":"6205_CR21","doi-asserted-by":"crossref","unstructured":"Patil V, Sakaridis C, Liniger A et\u00a0al (2022) P3depth: Monocular depth estimation with a piecewise planarity prior. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 1610\u20131621","DOI":"10.1109\/CVPR52688.2022.00166"},{"key":"6205_CR22","unstructured":"Bhat SF, Alhashim I, Wonka P (2021) Adabins: Depth estimation using adaptive bins. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 4009\u20134018"},{"key":"6205_CR23","unstructured":"Kim D, Ga W, Ahn P et\u00a0al (2022) Global-local path networks for monocular depth estimation with vertical cutdepth. arXiv preprint arXiv:2201.07436"},{"key":"6205_CR24","doi-asserted-by":"crossref","unstructured":"Yuan W, Gu X, Dai Z et\u00a0al (2022) Neural window fully-connected crfs for monocular depth estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 3916\u20133925","DOI":"10.1109\/CVPR52688.2022.00389"},{"key":"6205_CR25","doi-asserted-by":"crossref","unstructured":"Shao S, Li R, Pei Z et\u00a0al (2023) Towards comprehensive monocular depth estimation: Multiple heads are better than one. IEEE Transactions on Multimedia pp 7660\u20137671","DOI":"10.1109\/TMM.2022.3224810"},{"key":"6205_CR26","doi-asserted-by":"crossref","unstructured":"Yin W, Zhang C, Chen H et\u00a0al (2023) Metric3d: Towards zero-shot metric 3d prediction from a single image. In: Proceedings of the IEEE International Conference on Computer Vision, pp 9043\u20139053","DOI":"10.1109\/ICCV51070.2023.00830"},{"key":"6205_CR27","doi-asserted-by":"crossref","unstructured":"Wang Y, Li X, Shi M et\u00a0al (2021) Knowledge distillation for fast and accurate monocular depth estimation on mobile devices. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 2457\u20132465","DOI":"10.1109\/CVPRW53098.2021.00278"},{"issue":"1","key":"6205_CR28","doi-asserted-by":"publisher","first-page":"15","DOI":"10.3390\/s21010015","volume":"21","author":"F Aleotti","year":"2020","unstructured":"Aleotti F, Zaccaroni G, Bartolomei L et al (2020) Real-time single image depth perception in the wild with handheld devices. Sensors 21(1):15","journal-title":"Sensors"},{"key":"6205_CR29","doi-asserted-by":"publisher","first-page":"1085","DOI":"10.1109\/TMM.2021.3139217","volume":"25","author":"X Liang","year":"2021","unstructured":"Liang X, Tang Z, Wu J et al (2021) Robust image hashing with isomap and saliency map for copy detection. IEEE Trans Multimedia 25:1085\u20131097","journal-title":"IEEE Trans Multimedia"},{"key":"6205_CR30","doi-asserted-by":"crossref","unstructured":"Shim K, Kim J, Lee G et\u00a0al (2023) Depth-relative self attention for monocular depth estimation. In: Proceedings of the International Joint Conference on Artificial Intelligence, pp 1396\u20131404","DOI":"10.24963\/ijcai.2023\/155"},{"key":"6205_CR31","doi-asserted-by":"crossref","unstructured":"Lee Y, Kim J, Willette J et\u00a0al (2022) Mpvit: Multi-path vision transformer for dense prediction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 7287\u20137296","DOI":"10.1109\/CVPR52688.2022.00714"},{"key":"6205_CR32","doi-asserted-by":"crossref","unstructured":"Hu J, Shen L, Sun G (2018) Squeeze-and-excitation networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 7132\u20137141","DOI":"10.1109\/CVPR.2018.00745"},{"key":"6205_CR33","doi-asserted-by":"crossref","unstructured":"Huynh L, Nguyen-Ha P, Matas J et\u00a0al (2020) Guiding monocular depth estimation using depth-attention volume. In: Proceedings of the European Conference on Computer Vision, pp 581\u2013597","DOI":"10.1007\/978-3-030-58574-7_35"},{"key":"6205_CR34","doi-asserted-by":"crossref","unstructured":"Wang L, Zhang J, Wang Y et\u00a0al (2020) Cliffnet for monocular depth estimation with hierarchical embedding loss. In: Proceedings of the European Conference on Computer Vision, pp 316\u2013331","DOI":"10.1007\/978-3-030-58558-7_19"},{"key":"6205_CR35","first-page":"5998","volume":"30","author":"A Vaswani","year":"2017","unstructured":"Vaswani A, Shazeer N, Parmar N et al (2017) Attention is all you need. Adv Neural Inf Process Syst 30:5998\u20136008","journal-title":"Adv Neural Inf Process Syst"},{"key":"6205_CR36","doi-asserted-by":"crossref","unstructured":"Liu Z, Lin Y, Cao Y et\u00a0al (2021) Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE International Conference on Computer Vision, pp 10012\u201310022","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"6205_CR37","doi-asserted-by":"publisher","first-page":"12760","DOI":"10.1109\/TPAMI.2022.3202765","volume":"45","author":"YH Wu","year":"2023","unstructured":"Wu YH, Liu Y, Zhan X et al (2023) P2t: pyramid pooling transformer for scene understanding. IEEE Trans Pattern Anal Mach Intell 45:12760\u201312771","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"6205_CR38","first-page":"12077","volume":"34","author":"E Xie","year":"2021","unstructured":"Xie E, Wang W, Yu Z et al (2021) Segformer: simple and efficient design for semantic segmentation with transformers. Adv Neural Inf Process Syst 34:12077\u201312090","journal-title":"Adv Neural Inf Process Syst"},{"key":"6205_CR39","doi-asserted-by":"crossref","unstructured":"Pan X, Ye T, Xia Z et\u00a0al (2023) Slide-transformer: Hierarchical vision transformer with local self-attention. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 2082\u20132091","DOI":"10.1109\/CVPR52729.2023.00207"},{"key":"6205_CR40","first-page":"1","volume":"2","author":"C Xia","year":"2023","unstructured":"Xia C, Chen D, Gao X et al (2023) Mfcinet: multi-level feature and context information fusion network for rgb-d salient object detection. J Supercomput 2:1\u201327","journal-title":"J Supercomput"},{"key":"6205_CR41","doi-asserted-by":"crossref","unstructured":"Lu C, de\u00a0Geus D, Dubbelman G (2023) Content-aware token sharing for efficient semantic segmentation with vision transformers. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 23631\u201323640","DOI":"10.1109\/CVPR52729.2023.02263"},{"issue":"6","key":"6205_CR42","doi-asserted-by":"publisher","first-page":"8268","DOI":"10.1007\/s11227-021-04151-2","volume":"78","author":"C Gou","year":"2022","unstructured":"Gou C, Zhou Y, Li D (2022) Driver attention prediction based on convolution and transformers. J Supercomput 78(6):8268\u20138284","journal-title":"J Supercomput"},{"key":"6205_CR43","doi-asserted-by":"publisher","first-page":"1583","DOI":"10.1007\/s13042-020-01251-y","volume":"12","author":"Y Chen","year":"2021","unstructured":"Chen Y, Zhao H, Hu Z et al (2021) Attention-based context aggregation network for monocular depth estimation. Int J Mach Learn Cybern 12:1583\u20131596","journal-title":"Int J Mach Learn Cybern"},{"key":"6205_CR44","doi-asserted-by":"crossref","unstructured":"Laina I, Rupprecht C, Belagiannis V et\u00a0al (2016) Deeper depth prediction with fully convolutional residual networks. In: Proceedings of the Fourth International Conference on 3D Vision, pp 239\u2013248","DOI":"10.1109\/3DV.2016.32"},{"key":"6205_CR45","doi-asserted-by":"crossref","unstructured":"Hu J, Ozay M, Zhang Y et\u00a0al (2019) Revisiting single image depth estimation: Toward higher resolution maps with accurate object boundaries. In: Proceedings of the IEEE Winter Conference on Applications of Computer Vision, pp 1043\u20131051","DOI":"10.1109\/WACV.2019.00116"},{"key":"6205_CR46","doi-asserted-by":"crossref","unstructured":"Ning C, Gan H (2023) Trap attention: Monocular depth estimation with manual traps. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 5033\u20135043","DOI":"10.1109\/CVPR52729.2023.00487"},{"key":"6205_CR47","doi-asserted-by":"crossref","unstructured":"Zhao H, Shi J, Qi X et\u00a0al (2017) Pyramid scene parsing network. In: Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pp 2881\u20132890","DOI":"10.1109\/CVPR.2017.660"},{"key":"6205_CR48","doi-asserted-by":"crossref","unstructured":"Chen X, Lin KY, Wang J et\u00a0al (2020) Bi-directional cross-modality feature propagation with separation-and-aggregation gate for rgb-d semantic segmentation. In: Proceedings of the European Conference on Computer Vision, pp 561\u2013577","DOI":"10.1007\/978-3-030-58621-8_33"},{"issue":"19","key":"6205_CR49","doi-asserted-by":"publisher","first-page":"18762","DOI":"10.1109\/JSEN.2022.3199265","volume":"22","author":"SJ Hwang","year":"2022","unstructured":"Hwang SJ, Park SJ, Baek JH et al (2022) Self-supervised monocular depth estimation using hybrid transformer encoder. IEEE Sens J 22(19):18762\u201318770","journal-title":"IEEE Sens J"},{"key":"6205_CR50","doi-asserted-by":"crossref","unstructured":"Peng C, Zhang X, Yu G et\u00a0al (2017) Large kernel matters\u2013improve semantic segmentation by global convolutional network. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 4353\u20134361","DOI":"10.1109\/CVPR.2017.189"},{"key":"6205_CR51","doi-asserted-by":"crossref","unstructured":"Lee S, Lee J, Kim B et\u00a0al (2021) Patch-wise attention network for monocular depth estimation. In: Proceedings of the AAAI Conference on Artificial Intelligence, pp 1873\u20131881","DOI":"10.1609\/aaai.v35i3.16282"},{"key":"6205_CR52","doi-asserted-by":"crossref","unstructured":"Silberman N, Hoiem D, Kohli P et\u00a0al (2012) Indoor segmentation and support inference from rgbd images. In: Proceedings of the European Conference on Computer Vision, pp 746\u2013760","DOI":"10.1007\/978-3-642-33715-4_54"},{"issue":"11","key":"6205_CR53","doi-asserted-by":"publisher","first-page":"1231","DOI":"10.1177\/0278364913491297","volume":"32","author":"A Geiger","year":"2013","unstructured":"Geiger A, Lenz P, Stiller C et al (2013) Vision meets robotics: the kitti dataset. The International Journal of Robotics Research 32(11):1231\u20131237","journal-title":"The International Journal of Robotics Research"},{"key":"6205_CR54","doi-asserted-by":"crossref","unstructured":"Song S, Lichtenberg SP, Xiao J (2015) Sun rgb-d: A rgb-d scene understanding benchmark suite. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 567\u2013576","DOI":"10.1109\/CVPR.2015.7298655"},{"key":"6205_CR55","first-page":"8024","volume":"32","author":"A Paszke","year":"2019","unstructured":"Paszke A, Gross S, Massa F et al (2019) Pytorch: an imperative style, high-performance deep learning library. Adv Neural Inf Process Syst 32:8024\u20138035","journal-title":"Adv Neural Inf Process Syst"},{"key":"6205_CR56","unstructured":"Ishii Y, Yamashita T (2021) Cutdepth: Edge-aware data augmentation in depth estimation. arXiv preprint arXiv:2107.07684"},{"key":"6205_CR57","doi-asserted-by":"crossref","unstructured":"Guizilini V, Ambrus R, Burgard W et\u00a0al (2021) Sparse auxiliary networks for unified monocular depth prediction and completion. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 11078\u201311088","DOI":"10.1109\/CVPR46437.2021.01093"},{"key":"6205_CR58","doi-asserted-by":"crossref","unstructured":"Naderi T, Sadovnik A, Hayward J et\u00a0al (2022) Monocular depth estimation with adaptive geometric attention. In: Proceedings of the IEEE Winter Conference on Applications of Computer Vision, pp 944\u2013954","DOI":"10.1109\/WACV51458.2022.00069"},{"key":"6205_CR59","doi-asserted-by":"crossref","unstructured":"Lee M, Hwang S, Park C et\u00a0al (2022) Edgeconv with attention module for monocular depth estimation. In: Proceedings of the IEEE Winter Conference on Applications of Computer Vision, pp 2858\u20132867","DOI":"10.1109\/WACV51458.2022.00242"},{"key":"6205_CR60","doi-asserted-by":"crossref","unstructured":"Kuznietsov Y, Stuckler J, Leibe B (2017) Semi-supervised deep learning for monocular depth map prediction. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 6647\u20136655","DOI":"10.1109\/CVPR.2017.238"},{"key":"6205_CR61","doi-asserted-by":"crossref","unstructured":"Gan Y, Xu X, Sun W et\u00a0al (2018) Monocular depth estimation with affinity, vertical pooling, and label enhancement. In: Proceedings of the European Conference on Computer Vision, pp 224\u2013239","DOI":"10.1007\/978-3-030-01219-9_14"},{"key":"6205_CR62","doi-asserted-by":"crossref","unstructured":"Chen X, Chen X, Zha ZJ (2019) Structure-aware residual pyramid network for monocular depth estimation. In: Proceedings of the International Joint Conference on Artificial Intelligence, pp 694\u2013700","DOI":"10.24963\/ijcai.2019\/98"},{"key":"6205_CR63","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S et\u00a0al (2016) Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 770\u2013778","DOI":"10.1109\/CVPR.2016.90"}],"container-title":["The Journal of Supercomputing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11227-024-06205-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11227-024-06205-7\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11227-024-06205-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T14:02:49Z","timestamp":1722607369000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11227-024-06205-7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,6,5]]},"references-count":63,"journal-issue":{"issue":"14","published-print":{"date-parts":[[2024,9]]}},"alternative-id":["6205"],"URL":"https:\/\/doi.org\/10.1007\/s11227-024-06205-7","relation":{},"ISSN":["0920-8542","1573-0484"],"issn-type":[{"type":"print","value":"0920-8542"},{"type":"electronic","value":"1573-0484"}],"subject":[],"published":{"date-parts":[[2024,6,5]]},"assertion":[{"value":"8 May 2024","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 June 2024","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that there is no conflict of interest that can have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}