{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T07:27:18Z","timestamp":1740122838100,"version":"3.37.3"},"reference-count":36,"publisher":"Springer Science and Business Media LLC","issue":"25","license":[{"start":{"date-parts":[[2022,6,15]],"date-time":"2022-06-15T00:00:00Z","timestamp":1655251200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,6,15]],"date-time":"2022-06-15T00:00:00Z","timestamp":1655251200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"name":"Key R&D Program of Jiangsu Province","award":["No. BE2018066"],"award-info":[{"award-number":["No. BE2018066"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["No.U1830105"],"award-info":[{"award-number":["No.U1830105"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"published-print":{"date-parts":[[2022,10]]},"DOI":"10.1007\/s11042-022-13341-w","type":"journal-article","created":{"date-parts":[[2022,6,15]],"date-time":"2022-06-15T02:02:26Z","timestamp":1655258546000},"page":"35915-35933","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["An adaptive converged depth completion network based on efficient RGB guidance"],"prefix":"10.1007","volume":"81","author":[{"given":"Kaixiang","family":"Liu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3224-9831","authenticated-orcid":false,"given":"Qingwu","family":"Li","sequence":"additional","affiliation":[]},{"given":"Yaqin","family":"Zhou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,6,15]]},"reference":[{"key":"13341_CR1","doi-asserted-by":"crossref","unstructured":"Chen J, Wang X, Guo Z, Zhang X, Sun J (2020) Dynamic region-aware convolution. arXiv:2003.12243","DOI":"10.1109\/CVPR46437.2021.00797"},{"key":"13341_CR2","doi-asserted-by":"crossref","unstructured":"Chen Y, Dai X, Liu M, Chen D, Yuan L, Liu Z (2020) Dynamic convolution: Attention over convolution kernels. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 11030\u201311039","DOI":"10.1109\/CVPR42600.2020.01104"},{"key":"13341_CR3","doi-asserted-by":"crossref","unstructured":"Cheng X, Wang P, Guan C, Yang R (2020) Cspn++: Learning context and resource aware convolutional spatial propagation networks for depth completion. In: AAAI, pp 10615\u201310622","DOI":"10.1609\/aaai.v34i07.6635"},{"key":"13341_CR4","doi-asserted-by":"crossref","unstructured":"Cheng X, Wang P, Yang R (2018) Depth estimation via affinity learned with convolutional spatial propagation network. In: Proceedings of the European conference on computer vision (ECCV), pp 103\u2013119","DOI":"10.1007\/978-3-030-01270-0_7"},{"key":"13341_CR5","doi-asserted-by":"crossref","unstructured":"Eldesokey A, Felsberg M, Khan FS (2019) Confidence propagation through cnns for guided sparse depth regression. IEEE Transactions on Pattern Analysis and Machine Intelligence","DOI":"10.1109\/TPAMI.2019.2929170"},{"key":"13341_CR6","doi-asserted-by":"crossref","unstructured":"Fu J, Liu J, Tian H, Li Y, Bao Y, Fang Z, Lu H (2019) Dual attention network for scene segmentation. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 3146\u2013 3154","DOI":"10.1109\/CVPR.2019.00326"},{"issue":"11","key":"13341_CR7","doi-asserted-by":"publisher","first-page":"1231","DOI":"10.1177\/0278364913491297","volume":"32","author":"A Geiger","year":"2013","unstructured":"Geiger A, Lenz P, Stiller C, Urtasun R (2013) Vision meets robotics: the kitti dataset. The International Journal of Robotics Research 32(11):1231\u20131237","journal-title":"The International Journal of Robotics Research"},{"issue":"2","key":"13341_CR8","doi-asserted-by":"publisher","first-page":"1808","DOI":"10.1109\/LRA.2021.3060396","volume":"6","author":"J Gu","year":"2021","unstructured":"Gu J, Xiang Z, Ye Y, Wang L (2021) Denselidar: a real-time pseudo dense depth guided depth completion network. IEEE Robotics and Automation Letters 6(2):1808\u20131815","journal-title":"IEEE Robotics and Automation Letters"},{"key":"13341_CR9","doi-asserted-by":"crossref","unstructured":"Hu J, Shen L, Sun G (2018) Squeeze-and-excitation networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 7132\u20137141","DOI":"10.1109\/CVPR.2018.00745"},{"key":"13341_CR10","doi-asserted-by":"crossref","unstructured":"Hu M, Wang S, Li B, Ning S, Fan L, Gong X (2021) Penet: Towards precise and efficient image guided depth completion. arXiv:2103.00783","DOI":"10.1109\/ICRA48506.2021.9561035"},{"key":"13341_CR11","doi-asserted-by":"crossref","unstructured":"Imran S, Long Y, Liu X, Morris D (2019) Depth coefficients for depth completion. In: 2019 IEEE\/CVF Conference on computer vision and pattern recognition (CVPR), IEEE, pp 12438\u201312447","DOI":"10.1109\/CVPR.2019.01273"},{"key":"13341_CR12","unstructured":"Jaderberg M, Simonyan K, Zisserman A et al (2015) Spatial transformer networks. In: Advances in neural information processing systems, pp 2017\u20132025"},{"key":"13341_CR13","doi-asserted-by":"crossref","unstructured":"Jaritz M, De Charette R, Wirbel E, Perrotton X, Nashashibi F (2018) Sparse and dense data with cnns: Depth completion and semantic segmentation. In: 2018 International conference on 3d vision (3DV), IEEE, pp 52\u201360","DOI":"10.1109\/3DV.2018.00017"},{"key":"13341_CR14","unstructured":"Kingma DP, Ba J (2014) Adam: A method for stochastic optimization. arXiv:1412.6980"},{"key":"13341_CR15","doi-asserted-by":"crossref","unstructured":"Ku J, Harakeh A, Waslander SL (2018) In defense of classical image processing: Fast depth completion on the cpu. In: 2018 15Th conference on computer and robot vision (CRV), IEEE, pp 16\u201322","DOI":"10.1109\/CRV.2018.00013"},{"key":"13341_CR16","doi-asserted-by":"crossref","unstructured":"Liu L, Song X, Lyu X, Diao J, Wang M, Liu Y, Zhang L (2021) Fcfr-net: Feature fusion based coarse-to-fine residual learning for depth completion. In: Proceedings of the AAAI conference on artificial intelligence, vol 35, pp 2136\u20132144","DOI":"10.1609\/aaai.v35i3.16311"},{"issue":"6","key":"13341_CR17","doi-asserted-by":"publisher","first-page":"1983","DOI":"10.1109\/TIP.2015.2409551","volume":"24","author":"LK Liu","year":"2015","unstructured":"Liu LK, Chan SH, Nguyen TQ (2015) Depth reconstruction from sparse samples: Representation, algorithm, and sampling. IEEE Trans Image Process 24 (6):1983\u20131996","journal-title":"IEEE Trans Image Process"},{"key":"13341_CR18","doi-asserted-by":"crossref","unstructured":"Lu K, Barnes N, Anwar S, Zheng L (2020) From depth what can you see? depth completion via auxiliary image reconstruction. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 11306\u201311315","DOI":"10.1109\/CVPR42600.2020.01132"},{"key":"13341_CR19","doi-asserted-by":"crossref","unstructured":"Ma F, Cavalheiro GV, Karaman S (2019) Self-supervised sparse-to-dense: Self-supervised depth completion from lidar and monocular camera. In: 2019 International conference on robotics and automation (ICRA), IEEE, pp 3288\u20133295","DOI":"10.1109\/ICRA.2019.8793637"},{"key":"13341_CR20","doi-asserted-by":"crossref","unstructured":"Mal F, Karaman S (2018) Sparse-to-dense: Depth prediction from sparse depth samples and a single image. In: 2018 IEEE International conference on robotics and automation (ICRA), IEEE, pp 1\u20138","DOI":"10.1109\/ICRA.2018.8460184"},{"key":"13341_CR21","doi-asserted-by":"crossref","unstructured":"Park J, Joo K, Hu Z, Liu CK, Kweon IS (2020) Non-local spatial propagation network for depth completion. arXiv:2007.10042","DOI":"10.1007\/978-3-030-58601-0_8"},{"key":"13341_CR22","doi-asserted-by":"crossref","unstructured":"Qiu J, Cui Z, Zhang Y, Zhang X, Liu S, Zeng B, Pollefeys M (2019) Deeplidar: Deep surface normal guided depth prediction for outdoor scene from sparse lidar data and single color image. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 3313\u20133322","DOI":"10.1109\/CVPR.2019.00343"},{"key":"13341_CR23","doi-asserted-by":"crossref","unstructured":"Schuster R, Wasenm\u00fcller O, Unger C, Stricker D (2020) Ssgp: Sparse spatial guided propagation for robust and generic interpolation. arXiv:2008.09346","DOI":"10.1109\/WACV48630.2021.00024"},{"key":"13341_CR24","doi-asserted-by":"crossref","unstructured":"Shivakumar SS, Nguyen T, Miller ID, Chen SW, Kumar V, Taylor CJ (2019) Dfusenet: Deep fusion of rgb and sparse depth information for image guided dense depth completion. In: 2019 IEEE Intelligent transportation systems conference (ITSC), IEEE, pp 13\u201320","DOI":"10.1109\/ITSC.2019.8917294"},{"key":"13341_CR25","unstructured":"Tang J, Tian FP, Feng W, Li J, Tan P (2019) Learning guided convolutional network for depth completion. arXiv:1908.01238"},{"key":"13341_CR26","doi-asserted-by":"crossref","unstructured":"Uhrig J, Schneider N, Schneider L, Franke U, Brox T, Geiger A (2017) Sparsity invariant cnns. In: 2017 International conference on 3d vision (3DV), IEEE, pp 11\u201320","DOI":"10.1109\/3DV.2017.00012"},{"key":"13341_CR27","doi-asserted-by":"crossref","unstructured":"Van Gansbeke W, Neven D, De Brabandere B, Van Gool L (2019) Sparse and noisy lidar completion with rgb guidance and uncertainty. In: 2019 16Th international conference on machine vision applications (MVA), IEEE, pp 1\u20136","DOI":"10.23919\/MVA.2019.8757939"},{"key":"13341_CR28","doi-asserted-by":"crossref","unstructured":"Wang F, Jiang M, Qian C, Yang S, Li C, Zhang H, Wang X, Tang X (2017) Residual attention network for image classification. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 3156\u20133164","DOI":"10.1109\/CVPR.2017.683"},{"key":"13341_CR29","doi-asserted-by":"crossref","unstructured":"Wang X, Girshick R, Gupta A, He K (2018) Non-local neural networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 7794\u20137803","DOI":"10.1109\/CVPR.2018.00813"},{"key":"13341_CR30","doi-asserted-by":"crossref","unstructured":"Woo S, Park J, Lee JY, So Kweon I (2018) Cbam: Convolutional block attention module. In: Proceedings of the European conference on computer vision (ECCV), pp 3\u201319","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"13341_CR31","unstructured":"Xiang R, Zheng F, Su H, Zhang Z (2020) 3ddepthnet:, Point cloud guided depth completion network for sparse depth and single color image. arXiv:2003.09175"},{"key":"13341_CR32","doi-asserted-by":"crossref","unstructured":"Xu Y, Zhu X, Shi J, Zhang G, Bao H, Li H (2019) Depth completion from sparse lidar data with depth-normal constraints. In: Proceedings of the IEEE international conference on computer vision, pp 2811\u20132820","DOI":"10.1109\/ICCV.2019.00290"},{"key":"13341_CR33","doi-asserted-by":"crossref","unstructured":"Xu Z, Yin H, Yao J (2020) Deformable spatial propagation networks for depth completion. In: 2020 IEEE International conference on image processing (ICIP), IEEE, pp 913\u2013917","DOI":"10.1109\/ICIP40778.2020.9191138"},{"key":"13341_CR34","unstructured":"Zhang Y, Nguyen T, Miller ID, Shivakumar SS, Chen S, Taylor CJ, Kumar V (2019) Dfinenet:, Ego-motion estimation and depth refinement from sparse, noisy depth input with rgb guidance. arXiv:1903.06397"},{"key":"13341_CR35","unstructured":"Zhang Y, Zhang J, Wang Q, Zhong Z (2020) Dynet: Dynamic convolution for accelerating convolutional neural networks. arXiv:2004.10694"},{"key":"13341_CR36","doi-asserted-by":"crossref","unstructured":"Zhao S, Gong M, Fu H, Tao D (2021) Adaptive context-aware multi-modal network for depth completion. IEEE Transactions on Image Processing","DOI":"10.1109\/TIP.2021.3079821"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-022-13341-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-022-13341-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-022-13341-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,24]],"date-time":"2022-09-24T04:20:06Z","timestamp":1663993206000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-022-13341-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6,15]]},"references-count":36,"journal-issue":{"issue":"25","published-print":{"date-parts":[[2022,10]]}},"alternative-id":["13341"],"URL":"https:\/\/doi.org\/10.1007\/s11042-022-13341-w","relation":{},"ISSN":["1380-7501","1573-7721"],"issn-type":[{"type":"print","value":"1380-7501"},{"type":"electronic","value":"1573-7721"}],"subject":[],"published":{"date-parts":[[2022,6,15]]},"assertion":[{"value":"23 March 2021","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"13 October 2021","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 June 2022","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"15 June 2022","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"All authors certify that they have no affiliations with or involvement in any organization or entity with any financial interest or non-financial interest in the subject matter or materials discussed in this manuscript.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"<!--Emphasis Type='Bold' removed-->Conflict of Interests"}}]}}