{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,4]],"date-time":"2025-11-04T16:19:56Z","timestamp":1762273196636,"version":"3.44.0"},"reference-count":30,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2024,3,1]],"date-time":"2024-03-01T00:00:00Z","timestamp":1709251200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2024,3,1]],"date-time":"2024-03-01T00:00:00Z","timestamp":1709251200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2024,3,1]],"date-time":"2024-03-01T00:00:00Z","timestamp":1709251200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2024,3,1]],"date-time":"2024-03-01T00:00:00Z","timestamp":1709251200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2024,3,1]],"date-time":"2024-03-01T00:00:00Z","timestamp":1709251200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2024,3,1]],"date-time":"2024-03-01T00:00:00Z","timestamp":1709251200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,3,1]],"date-time":"2024-03-01T00:00:00Z","timestamp":1709251200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61932020","61976038","U1908210"],"award-info":[{"award-number":["61932020","61976038","U1908210"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Pattern Recognition Letters"],"published-print":{"date-parts":[[2024,3]]},"DOI":"10.1016\/j.patrec.2024.02.006","type":"journal-article","created":{"date-parts":[[2024,2,10]],"date-time":"2024-02-10T11:07:35Z","timestamp":1707563255000},"page":"172-177","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":2,"special_numbering":"C","title":["CustomDepth: Customizing point-wise depth categories for depth completion"],"prefix":"10.1016","volume":"179","author":[{"given":"Shenglun","family":"Chen","sequence":"first","affiliation":[]},{"given":"Xinchen","family":"Ye","sequence":"additional","affiliation":[]},{"given":"Hong","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Haojie","family":"Li","sequence":"additional","affiliation":[]},{"given":"Zhihui","family":"Wang","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.patrec.2024.02.006_b1","series-title":"ICRA","first-page":"4796","article-title":"Sparse-to-dense: Depth prediction from sparse depth samples and a single image","author":"Ma","year":"2018"},{"key":"10.1016\/j.patrec.2024.02.006_b2","doi-asserted-by":"crossref","DOI":"10.1016\/j.patrec.2023.02.010","article-title":"Local selective vision transformer for depth estimation using a compound eye camera","author":"Oh","year":"2023","journal-title":"Pattern Recognit. Lett."},{"key":"10.1016\/j.patrec.2024.02.006_b3","doi-asserted-by":"crossref","first-page":"63","DOI":"10.1016\/j.patrec.2014.03.026","article-title":"Edge-aware depth image filtering using color segmentation","volume":"50","author":"Schmeing","year":"2014","journal-title":"Pattern Recognit. Lett."},{"key":"10.1016\/j.patrec.2024.02.006_b4","doi-asserted-by":"crossref","first-page":"1615","DOI":"10.1109\/LSP.2021.3092280","article-title":"Dilated U-block for lightweight indoor depth completion with sobel edge","volume":"28","author":"Tao","year":"2021","journal-title":"IEEE Signal Process. Lett."},{"key":"10.1016\/j.patrec.2024.02.006_b5","series-title":"ECCV","article-title":"RigNet: Repetitive image guided network for depth completion","author":"Yan","year":"2022"},{"key":"10.1016\/j.patrec.2024.02.006_b6","series-title":"CVPR","article-title":"DeepLiDAR: Deep surface normal guided depth prediction for outdoor scene from sparse LiDAR data and single color image","author":"Qiu","year":"2019"},{"key":"10.1016\/j.patrec.2024.02.006_b7","doi-asserted-by":"crossref","first-page":"95","DOI":"10.1016\/j.patrec.2015.07.025","article-title":"Trilateral constrained sparse representation for Kinect depth hole filling","volume":"65","author":"Wang","year":"2015","journal-title":"Pattern Recognit. Lett."},{"key":"10.1016\/j.patrec.2024.02.006_b8","doi-asserted-by":"crossref","first-page":"5264","DOI":"10.1109\/TIP.2021.3079821","article-title":"Adaptive context-aware multi-modal network for depth completion","volume":"30","author":"Zhao","year":"2021","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.patrec.2024.02.006_b9","series-title":"ICRA","first-page":"8680","article-title":"Depth completion using geometry-aware embedding","author":"du","year":"2022"},{"key":"10.1016\/j.patrec.2024.02.006_b10","series-title":"CVPR","first-page":"12438","article-title":"Depth coefficients for depth completion","author":"Imran","year":"2019"},{"key":"10.1016\/j.patrec.2024.02.006_b11","series-title":"CVPR","first-page":"13916","article-title":"Depth completion using plane-residual representation","author":"Lee","year":"2021"},{"key":"10.1016\/j.patrec.2024.02.006_b12","article-title":"Learning guided convolutional network for depth completion","author":"Tang","year":"2020","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.patrec.2024.02.006_b13","doi-asserted-by":"crossref","unstructured":"L. Liu, X. Song, X. Lyu, J. Diao, M. Wang, Y. Liu, L. Zhang, FCFR-Net: Feature Fusion based Coarse- to-Fine Residual Learning for Depth Completion, in: AAAI, Vol. 35, 2021, pp. 2136\u20132144, no. 3.","DOI":"10.1609\/aaai.v35i3.16311"},{"key":"10.1016\/j.patrec.2024.02.006_b14","series-title":"ICCV","first-page":"2811","article-title":"Depth completion from sparse lidar data with depth-normal constraints","author":"Xu","year":"2019"},{"key":"10.1016\/j.patrec.2024.02.006_b15","article-title":"Learning affinity via spatial propagation networks","volume":"30","author":"Liu","year":"2017","journal-title":"NeurIPS"},{"key":"10.1016\/j.patrec.2024.02.006_b16","series-title":"ECCV","first-page":"103","article-title":"Depth estimation via affinity learned with convolutional spatial propagation network","author":"Cheng","year":"2018"},{"key":"10.1016\/j.patrec.2024.02.006_b17","series-title":"ECCV","first-page":"120","article-title":"Non-local spatial propagation network for depth completion","author":"Park","year":"2020"},{"key":"10.1016\/j.patrec.2024.02.006_b18","series-title":"ACM MM","first-page":"3926","article-title":"Pixelwise adaptive discretization with uncertainty sampling for depth completion","author":"Peng","year":"2022"},{"key":"10.1016\/j.patrec.2024.02.006_b19","series-title":"CVPR","first-page":"13906","article-title":"Cfnet: Cascade and fused cost volume for robust stereo matching","author":"Shen","year":"2021"},{"key":"10.1016\/j.patrec.2024.02.006_b20","series-title":"ICCV","first-page":"2304","article-title":"Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization","author":"Saito","year":"2019"},{"key":"10.1016\/j.patrec.2024.02.006_b21","series-title":"CVPR","first-page":"9799","article-title":"Pointrend: Image segmentation as rendering","author":"Kirillov","year":"2020"},{"key":"10.1016\/j.patrec.2024.02.006_b22","series-title":"CVPR","first-page":"3354","article-title":"Are we ready for autonomous driving? the kitti vision benchmark suite","author":"Geiger","year":"2012"},{"issue":"2","key":"10.1016\/j.patrec.2024.02.006_b23","doi-asserted-by":"crossref","first-page":"1899","DOI":"10.1109\/LRA.2020.2969938","article-title":"Unsupervised depth completion from visual inertial odometry","volume":"5","author":"Wong","year":"2020","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.patrec.2024.02.006_b24","series-title":"ECCV","first-page":"35","article-title":"Monitored distillation for positive congruent depth completion","author":"Liu","year":"2022"},{"year":"2022","series-title":"Dynamic spatial propagation network for depth completion","author":"Lin","key":"10.1016\/j.patrec.2024.02.006_b25"},{"key":"10.1016\/j.patrec.2024.02.006_b26","series-title":"ECCV","first-page":"90","article-title":"GraphCSPN: Geometry-aware depth completion via dynamic GCNs","author":"Liu","year":"2022"},{"issue":"1","key":"10.1016\/j.patrec.2024.02.006_b27","doi-asserted-by":"crossref","first-page":"81","DOI":"10.1109\/LRA.2021.3117254","article-title":"ABCD: Attentive bilateral convolutional network for robust depth completion","volume":"7","author":"Jeon","year":"2021","journal-title":"IEEE Robot. Autom. Lett."},{"key":"10.1016\/j.patrec.2024.02.006_b28","series-title":"ICRA","first-page":"13656","article-title":"PENet: Towards precise and efficient image guided depth completion","author":"Hu","year":"2021"},{"key":"10.1016\/j.patrec.2024.02.006_b29","series-title":"ICCV","first-page":"12747","article-title":"Unsupervised depth completion with calibrated backprojection layers","author":"Wong","year":"2021"},{"key":"10.1016\/j.patrec.2024.02.006_b30","series-title":"CVPR","first-page":"7482","article-title":"Multi-task learning using uncertainty to weigh losses for scene geometry and semantics","author":"Kendall","year":"2018"}],"container-title":["Pattern Recognition Letters"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0167865524000400?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0167865524000400?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2025,9,29]],"date-time":"2025-09-29T06:24:33Z","timestamp":1759127073000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0167865524000400"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,3]]},"references-count":30,"alternative-id":["S0167865524000400"],"URL":"https:\/\/doi.org\/10.1016\/j.patrec.2024.02.006","relation":{},"ISSN":["0167-8655"],"issn-type":[{"type":"print","value":"0167-8655"}],"subject":[],"published":{"date-parts":[[2024,3]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"CustomDepth: Customizing point-wise depth categories for depth completion","name":"articletitle","label":"Article Title"},{"value":"Pattern Recognition Letters","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.patrec.2024.02.006","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2024 Elsevier B.V. All rights reserved.","name":"copyright","label":"Copyright"}]}}