{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,5]],"date-time":"2026-05-05T01:40:47Z","timestamp":1777945247438,"version":"3.51.4"},"reference-count":21,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,7,1]],"date-time":"2026-07-01T00:00:00Z","timestamp":1782864000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,7,1]],"date-time":"2026-07-01T00:00:00Z","timestamp":1782864000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T00:00:00Z","timestamp":1773705600000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100001871","name":"Funda\u00e7\u00e3o para a Ci\u00eancia e a Tecnologia","doi-asserted-by":"publisher","award":["ISR-UC UID\/00048\/2025"],"award-info":[{"award-number":["ISR-UC UID\/00048\/2025"]}],"id":[{"id":"10.13039\/501100001871","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Robotics and Autonomous Systems"],"published-print":{"date-parts":[[2026,7]]},"DOI":"10.1016\/j.robot.2026.105437","type":"journal-article","created":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T16:36:19Z","timestamp":1773765379000},"page":"105437","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["Distilling Apple DepthPro for RGB-LiDAR depth estimation"],"prefix":"10.1016","volume":"201","author":[{"ORCID":"https:\/\/orcid.org\/0009-0005-0138-8451","authenticated-orcid":false,"given":"Manuel","family":"Abreu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3833-3794","authenticated-orcid":false,"given":"Lu\u00eds","family":"Garrote","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7750-5221","authenticated-orcid":false,"given":"Urbano J.","family":"Nunes","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.robot.2026.105437_b1","series-title":"International Conference on Learning Representations","article-title":"Depth Pro: Sharp monocular metric depth in less than a second","author":"Bochkovskii","year":"2025"},{"key":"10.1016\/j.robot.2026.105437_b2","series-title":"Proceedings of the IEEE International Conference on Computer Vision","first-page":"1557","article-title":"Surface normals in the wild","author":"Chen","year":"2017"},{"key":"10.1016\/j.robot.2026.105437_b3","doi-asserted-by":"crossref","DOI":"10.1016\/j.robot.2020.103701","article-title":"On deep learning techniques to boost monocular depth estimation for autonomous navigation","volume":"136","author":"de Queiroz Mendes","year":"2021","journal-title":"Robot. Auton. Syst."},{"issue":"6","key":"10.1016\/j.robot.2026.105437_b4","doi-asserted-by":"crossref","DOI":"10.3390\/s23062919","article-title":"Deep monocular depth estimation based on content and contextual features","volume":"23","author":"Abdulwahab","year":"2023","journal-title":"Sensors"},{"key":"10.1016\/j.robot.2026.105437_b5","doi-asserted-by":"crossref","unstructured":"L. Yang, B. Kang, Z. Huang, X. Xu, J. Feng, H. Zhao, Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data, in: IEEE Conference on Computer Vision and Pattern Recognition, CVPR, 2024, http:\/\/dx.doi.org\/10.1109\/CVPR52733.2024.00987.","DOI":"10.1109\/CVPR52733.2024.00987"},{"key":"10.1016\/j.robot.2026.105437_b6","series-title":"Advances in Neural Information Processing Systems","article-title":"Depth anything V2","author":"Yang","year":"2024"},{"key":"10.1016\/j.robot.2026.105437_b7","series-title":"ZoeDepth: Zero-shot transfer by combining relative and metric depth","author":"Bhat","year":"2023"},{"key":"10.1016\/j.robot.2026.105437_b8","series-title":"2016 IEEE 19th International Conference on Intelligent Transportation Systems","article-title":"High-resolution lidar-based depth mapping using bilateral filter","author":"Premebida","year":"2016"},{"key":"10.1016\/j.robot.2026.105437_b9","doi-asserted-by":"crossref","unstructured":"C. Fu, C. Mertz, J.M. Dolan, LIDAR and Monocular Camera Fusion: On-road Depth Completion for Autonomous Driving, in: 2019 IEEE Intelligent Transportation Systems Conference, ITSC, 2019, http:\/\/dx.doi.org\/10.1109\/ITSC.2019.8917201.","DOI":"10.1109\/ITSC.2019.8917201"},{"key":"10.1016\/j.robot.2026.105437_b10","series-title":"Proceedings of the 17th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications","first-page":"155","article-title":"SemSegDepth: A combined model for semantic segmentation and depth completion","author":"Lagos","year":"2022"},{"key":"10.1016\/j.robot.2026.105437_b11","doi-asserted-by":"crossref","DOI":"10.1016\/j.cviu.2022.103601","article-title":"LiDARTouch: Monocular metric depth estimation with a few-beam LiDAR","volume":"227","author":"Bartoccioni","year":"2023","journal-title":"Comput. Vis. Image Underst."},{"key":"10.1016\/j.robot.2026.105437_b12","doi-asserted-by":"crossref","unstructured":"K. He, X. Zhang, S. Ren, J. Sun, Deep Residual Learning for Image Recognition, in: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, CVPR, 2016, pp. 770\u2013778, http:\/\/dx.doi.org\/10.1109\/CVPR.2016.90.","DOI":"10.1109\/CVPR.2016.90"},{"key":"10.1016\/j.robot.2026.105437_b13","doi-asserted-by":"crossref","unstructured":"M. Sandler, A. Howard, M. Zhu, A. Zhmoginov, L.-C. Chen, MobileNetV2: Inverted Residuals and Linear Bottlenecks, in: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, CVPR, 2018, pp. 4510\u20134520, http:\/\/dx.doi.org\/10.1109\/CVPR.2018.00474.","DOI":"10.1109\/CVPR.2018.00474"},{"key":"10.1016\/j.robot.2026.105437_b14","doi-asserted-by":"crossref","unstructured":"Z. Liu, Y. Lin, Y. Cao, H. Hu, Y. Wei, Z. Zhang, S. Lin, B. Guo, Swin Transformer: Hierarchical Vision Transformer using Shifted Windows, in: Proceedings of the IEEE\/CVF International Conference on Computer Vision, ICCV, 2021, pp. 10012\u201310022, http:\/\/dx.doi.org\/10.1109\/ICCV48922.2021.00986.","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"10.1016\/j.robot.2026.105437_b15","unstructured":"A. Dosovitskiy, L. Beyer, A. Kolesnikov, D. Weissenborn, X. Zhai, T. Unterthiner, M. Dehghani, M. Minderer, G. Heigold, S. Gelly, J. Uszkoreit, N. Houlsby, An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale, in: International Conference on Learning Representations, ICLR, 2021, http:\/\/dx.doi.org\/10.48550\/arXiv.2010.11929."},{"key":"10.1016\/j.robot.2026.105437_b16","doi-asserted-by":"crossref","unstructured":"A. Howard, M. Sandler, G. Chu, L.-C. Chen, B. Chen, M. Tan, W. Wang, Y. Zhu, R. Pang, V. Vasudevan, Q.V. Le, H. Adam, Searching for MobileNetV3, in: Proceedings of the IEEE International Conference on Computer Vision, ICCV, 2019, pp. 1314\u20131324, http:\/\/dx.doi.org\/10.1109\/ICCV.2019.00140.","DOI":"10.1109\/ICCV.2019.00140"},{"key":"10.1016\/j.robot.2026.105437_b17","series-title":"Rethinking atrous convolution for semantic image segmentation","author":"Chen","year":"2017"},{"key":"10.1016\/j.robot.2026.105437_b18","series-title":"Ultralytics YOLO11","author":"Jocher","year":"2024"},{"issue":"3","key":"10.1016\/j.robot.2026.105437_b19","doi-asserted-by":"crossref","first-page":"5405","DOI":"10.1109\/LRA.2021.3067308","article-title":"Three-filters-to-normal: An accurate and ultrafast surface normal estimator","volume":"6","author":"Fan","year":"2021","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.robot.2026.105437_b20","doi-asserted-by":"crossref","unstructured":"R. Cruz, L. Garrote, A. Lopes, U.J. Nunes, Modular software architecture for human-robot interaction applied to the InterBot mobile robot, in: 2018 IEEE International Conference on Autonomous Robot Systems and Competitions, ICARSC, 2018, http:\/\/dx.doi.org\/10.1109\/ICARSC.2018.8374154.","DOI":"10.1109\/ICARSC.2018.8374154"},{"key":"10.1016\/j.robot.2026.105437_b21","series-title":"DL-Based Multimodal Object Detection and Tracking Targeting Industrial AMRs","author":"Borges","year":"2024"}],"container-title":["Robotics and Autonomous Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0921889026001107?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0921889026001107?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,5,2]],"date-time":"2026-05-02T10:37:26Z","timestamp":1777718246000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0921889026001107"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,7]]},"references-count":21,"alternative-id":["S0921889026001107"],"URL":"https:\/\/doi.org\/10.1016\/j.robot.2026.105437","relation":{},"ISSN":["0921-8890"],"issn-type":[{"value":"0921-8890","type":"print"}],"subject":[],"published":{"date-parts":[[2026,7]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Distilling Apple DepthPro for RGB-LiDAR depth estimation","name":"articletitle","label":"Article Title"},{"value":"Robotics and Autonomous Systems","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.robot.2026.105437","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 The Authors. Published by Elsevier B.V.","name":"copyright","label":"Copyright"}],"article-number":"105437"}}