{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,10]],"date-time":"2026-04-10T10:04:10Z","timestamp":1775815450864,"version":"3.50.1"},"publisher-location":"Cham","reference-count":26,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031200731","type":"print"},{"value":"9783031200748","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-20074-8_22","type":"book-chapter","created":{"date-parts":[[2022,11,11]],"date-time":"2022-11-11T20:23:11Z","timestamp":1668198191000},"page":"381-396","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":44,"title":["ClearPose: Large-scale Transparent Object Dataset and\u00a0Benchmark"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1988-059X","authenticated-orcid":false,"given":"Xiaotong","family":"Chen","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1707-233X","authenticated-orcid":false,"given":"Huijie","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6213-1394","authenticated-orcid":false,"given":"Zeren","family":"Yu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4093-302X","authenticated-orcid":false,"given":"Anthony","family":"Opipari","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3750-7334","authenticated-orcid":false,"given":"Odest","family":"Chadwicke Jenkins","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,12]]},"reference":[{"issue":"6","key":"22_CR1","doi-asserted-by":"publisher","first-page":"1874","DOI":"10.1109\/TRO.2021.3075644","volume":"37","author":"C Campos","year":"2021","unstructured":"Campos, C., Elvira, R., Rodr\u00edguez, J.J.G., Montiel, J.M., Tard\u00f3s, J.D.: Orb-slam3: an accurate open-source library for visual, visual-inertial, and multimap slam. IEEE Trans. Robot. 37(6), 1874\u20131890 (2021)","journal-title":"IEEE Trans. Robot."},{"key":"22_CR2","doi-asserted-by":"crossref","unstructured":"Chang, J., et al.: GhostPose:*: multi-view pose estimation of transparent objects for robot hand grasping. In: 2021 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 5749\u20135755. IEEE (2021)","DOI":"10.1109\/IROS51168.2021.9636459"},{"key":"22_CR3","doi-asserted-by":"crossref","unstructured":"Chen, G., Han, K., Wong, K.Y.K.: Tom-net: learning transparent object matting from a single image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 9233\u20139241 (2018)","DOI":"10.1109\/CVPR.2018.00962"},{"key":"22_CR4","unstructured":"Chen, L.C., Papandreou, G., Schroff, F., Adam, H.: Rethinking atrous convolution for semantic image segmentation. arXiv preprint arXiv:1706.05587 (2017)"},{"key":"22_CR5","doi-asserted-by":"crossref","unstructured":"Chen, X., Zhang, H., Yu, Z., Lewis, S., Jenkins, O.C.: ProgressLabeller: visual data stream annotation for training object-centric 3d perception. arXiv preprint arXiv:2203.00283 (2022)","DOI":"10.1109\/IROS47612.2022.9982076"},{"key":"22_CR6","unstructured":"Eigen, D., Puhrsch, C., Fergus, R.: Depth map prediction from a single image using a multi-scale deep network. In: Advances in neural Information Processing Systems, vol. 27 (2014)"},{"key":"22_CR7","doi-asserted-by":"crossref","unstructured":"Fang, H., Fang, H.S., Xu, S., Lu, C.: TransCG: a large-scale real-world dataset for transparent object depth completion and grasping. arXiv preprint arXiv:2202.08471 (2022)","DOI":"10.1109\/LRA.2022.3183256"},{"key":"22_CR8","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., Girshick, R.: Mask R-CNN. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2961\u20132969 (2017)","DOI":"10.1109\/ICCV.2017.322"},{"key":"22_CR9","doi-asserted-by":"crossref","unstructured":"He, Y., Huang, H., Fan, H., Chen, Q., Sun, J.: Ffb6d: a full flow bidirectional fusion network for 6d pose estimation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3003\u20133013 (2021)","DOI":"10.1109\/CVPR46437.2021.00302"},{"key":"22_CR10","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"577","DOI":"10.1007\/978-3-030-66096-3_39","volume-title":"Computer Vision","author":"T Hoda\u0148","year":"2020","unstructured":"Hoda\u0148, T., et al.: BOP challenge 2020 on 6d object localization. In: Bartoli, A., Fusiello, A. (eds.) ECCV 2020. LNCS, vol. 12536, pp. 577\u2013594. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-66096-3_39"},{"key":"22_CR11","doi-asserted-by":"crossref","unstructured":"Hong, X., Xiong, P., Ji, R., Fan, H.: Deep fusion network for image completion. In: Proceedings of the 27th ACM International Conference on Multimedia, pp. 2033\u20132042 (2019)","DOI":"10.1145\/3343031.3351002"},{"key":"22_CR12","doi-asserted-by":"crossref","unstructured":"Liu, X., Iwase, S., Kitani, K.M.: Stereobj-1 m: large-scale stereo image dataset for 6d object pose estimation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10870\u201310879 (2021)","DOI":"10.1109\/ICCV48922.2021.01069"},{"key":"22_CR13","doi-asserted-by":"crossref","unstructured":"Liu, X., Jonschkowski, R., Angelova, A., Konolige, K.: KeyPose: multi-view 3d labeling and keypoint estimation for transparent objects. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11602\u201311610 (2020)","DOI":"10.1109\/CVPR42600.2020.01162"},{"key":"22_CR14","doi-asserted-by":"crossref","unstructured":"Peng, S., Liu, Y., Huang, Q., Zhou, X., Bao, H.: PVNet: pixel-wise voting network for 6dof pose estimation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4561\u20134570 (2019)","DOI":"10.1109\/CVPR.2019.00469"},{"key":"22_CR15","doi-asserted-by":"crossref","unstructured":"Sajjan, S., et al.: Clear grasp: 3d shape estimation of transparent objects for manipulation. In: 2020 IEEE International Conference on Robotics and Automation (ICRA), pp. 3634\u20133642. IEEE (2020)","DOI":"10.1109\/ICRA40945.2020.9197518"},{"key":"22_CR16","doi-asserted-by":"crossref","unstructured":"Tang, Y., Chen, J., Yang, Z., Lin, Z., Li, Q., Liu, W.: DepthGrasp: depth completion of transparent objects using self-attentive adversarial network with spectral residual for grasping. In: 2021 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 5710\u20135716. IEEE (2021)","DOI":"10.1109\/IROS51168.2021.9636382"},{"key":"22_CR17","doi-asserted-by":"crossref","unstructured":"Tian, M., Pan, L., Ang, M.H., Lee, G.H.: Robust 6d object pose estimation by learning rgb-d features. In: 2020 IEEE International Conference on Robotics and Automation (ICRA), pp. 6218\u20136224. IEEE (2020)","DOI":"10.1109\/ICRA40945.2020.9197555"},{"key":"22_CR18","doi-asserted-by":"crossref","unstructured":"Wang, C., et al.: DenseFusion: 6d object pose estimation by iterative dense fusion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3343\u20133352 (2019)","DOI":"10.1109\/CVPR.2019.00346"},{"key":"22_CR19","doi-asserted-by":"crossref","unstructured":"Xiang, Y., Schmidt, T., Narayanan, V., Fox, D.: PoseCNN: a convolutional neural network for 6d object pose estimation in cluttered scenes. Robot. Sci. Syst. (2018)","DOI":"10.15607\/RSS.2018.XIV.019"},{"key":"22_CR20","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"696","DOI":"10.1007\/978-3-030-58601-0_41","volume-title":"Computer Vision","author":"E Xie","year":"2020","unstructured":"Xie, E., Wang, W., Wang, W., Ding, M., Shen, C., Luo, P.: Segmenting transparent objects in the wild. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12358, pp. 696\u2013711. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58601-0_41"},{"issue":"23","key":"22_CR21","doi-asserted-by":"publisher","first-page":"6790","DOI":"10.3390\/s20236790","volume":"20","author":"C Xu","year":"2020","unstructured":"Xu, C., Chen, J., Yao, M., Zhou, J., Zhang, L., Liu, Y.: 6dof pose estimation of transparent object from a single RGB-D image. Sensors 20(23), 6790 (2020)","journal-title":"Sensors"},{"key":"22_CR22","unstructured":"Xu, H., Wang, Y.R., Eppel, S., Aspuru-Guzik, A., Shkurti, F., Garg, A.: Seeing glass: joint point cloud and depth completion for transparent objects. arXiv preprint arXiv:2110.00087 (2021)"},{"key":"22_CR23","doi-asserted-by":"crossref","unstructured":"Xu, Y., Nagahara, H., Shimada, A., Taniguchi, R.i.: Transcut: transparent object segmentation from a light-field image. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 3442\u20133450 (2015)","DOI":"10.1109\/ICCV.2015.393"},{"key":"22_CR24","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Funkhouser, T.: Deep depth completion of a single RGB-d image. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 175\u2013185 (2018)","DOI":"10.1109\/CVPR.2018.00026"},{"issue":"3","key":"22_CR25","first-page":"4548","volume":"5","author":"Z Zhou","year":"2020","unstructured":"Zhou, Z., Chen, X., Jenkins, O.C.: Lit: light-field inference of transparency for refractive object localization. IEEE Robot. Autom. Lett. 5(3), 4548\u20134555 (2020)","journal-title":"IEEE Robot. Autom. Lett."},{"key":"22_CR26","doi-asserted-by":"crossref","unstructured":"Zhu, L., et al.: RGB-d local implicit function for depth completion of transparent objects. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4649\u20134658 (2021)","DOI":"10.1109\/CVPR46437.2021.00462"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-20074-8_22","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,3,11]],"date-time":"2023-03-11T21:49:24Z","timestamp":1678571364000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-20074-8_22"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031200731","9783031200748"],"references-count":26,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-20074-8_22","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"12 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}