{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T18:14:32Z","timestamp":1775067272991,"version":"3.50.1"},"publisher-location":"Cham","reference-count":42,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031730320","type":"print"},{"value":"9783031730337","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,10,31]],"date-time":"2024-10-31T00:00:00Z","timestamp":1730332800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,31]],"date-time":"2024-10-31T00:00:00Z","timestamp":1730332800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73033-7_10","type":"book-chapter","created":{"date-parts":[[2024,10,31]],"date-time":"2024-10-31T00:03:55Z","timestamp":1730333035000},"page":"169-185","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":34,"title":["OpenIns3D: Snap and\u00a0Lookup for\u00a03D Open-Vocabulary Instance Segmentation"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0039-4970","authenticated-orcid":false,"given":"Zhening","family":"Huang","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0002-2277-7104","authenticated-orcid":false,"given":"Xiaoyang","family":"Wu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0008-5008-4720","authenticated-orcid":false,"given":"Xi","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8277-2706","authenticated-orcid":false,"given":"Hengshuang","family":"Zhao","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3871-663X","authenticated-orcid":false,"given":"Lei","family":"Zhu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0571-0218","authenticated-orcid":false,"given":"Joan","family":"Lasenby","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,31]]},"reference":[{"key":"10_CR1","doi-asserted-by":"crossref","unstructured":"Armeni, I., et al.: 3D semantic parsing of large-scale indoor spaces. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.170"},{"key":"10_CR2","unstructured":"Bakr, E.M., Alsaedy, Y.Y., Elhoseiny, M.: Look around and refer: 2D synthetic semantics knowledge distillation for 3D visual grounding. In: NeurIPS (2022)"},{"key":"10_CR3","unstructured":"Brown, T., et\u00a0al.: Language models are few-shot learners. In: NeurIPS (2020)"},{"key":"10_CR4","unstructured":"Chen, M., et al.: STPLS3D: a large-scale synthetic and real aerial photogrammetry 3D point cloud dataset. In: BMVA (2022)"},{"key":"10_CR5","doi-asserted-by":"crossref","unstructured":"Chen, R., et al.: CLIP2Scene: towards label-efficient 3D scene understanding by clip. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00678"},{"key":"10_CR6","doi-asserted-by":"crossref","unstructured":"Chen, X., Li, S., Lim, S.N., Torralba, A., Zhao, H.: Open-vocabulary panoptic segmentation with embedding modulation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00111"},{"key":"10_CR7","doi-asserted-by":"crossref","unstructured":"Dai, A., Chang, A.X., Savva, M., Halber, M., Funkhouser, T., Nie\u00dfner, M.: ScanNet: Richly-annotated 3D reconstructions of indoor scenes. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.261"},{"key":"10_CR8","doi-asserted-by":"crossref","unstructured":"Ding, R., Yang, J., Xue, C., Zhang, W., Bai, S., Qi, X.: PLA: language-driven open-vocabulary 3D scene understanding. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00677"},{"key":"10_CR9","doi-asserted-by":"crossref","unstructured":"Ding, R., Yang, J., Xue, C., Zhang, W., Bai, S., Qi, X.: Lowis3D: language-driven open-world instance-level 3D scene understanding. In: TPAMI (2024)","DOI":"10.1109\/TPAMI.2024.3410324"},{"key":"10_CR10","unstructured":"Ding, Z., Wang, J., Tu, Z.: Open-vocabulary universal image segmentation with maskCLIP. In: ICML (2023)"},{"key":"10_CR11","unstructured":"Griffiths, D., Boehm, J.: SynthCity: a large-scale synthetic point cloud. In: arXiv (2019)"},{"key":"10_CR12","doi-asserted-by":"crossref","unstructured":"Hackel, T., Savinov, N., Ladicky, L., Wegner, J.D., Schindler, K., Pollefeys, M.: Semantic3D.net: a new large-scale point cloud classification benchmark. In: ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences (2017)","DOI":"10.5194\/isprs-annals-IV-1-W1-91-2017"},{"key":"10_CR13","doi-asserted-by":"crossref","unstructured":"Huang, T., et al.: CLIP2point: transfer clip to point cloud classification with image-depth pre-training. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.02025"},{"key":"10_CR14","doi-asserted-by":"crossref","unstructured":"Huang, Z., Huang, L., Gong, Y., Huang, C., Wang, X.: Mask scoring R-CNN. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00657"},{"key":"10_CR15","doi-asserted-by":"crossref","unstructured":"Kirillov, A., et al.: Segment anything. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"10_CR16","doi-asserted-by":"crossref","unstructured":"Kundu, A., et al.: Virtual multi-view fusion for 3D semantic segmentation. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58586-0_31"},{"key":"10_CR17","doi-asserted-by":"crossref","unstructured":"Lai, X., et al.: LISA: reasoning segmentation via large language model. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00915"},{"key":"10_CR18","doi-asserted-by":"crossref","unstructured":"Liu, S., et\u00a0al.: Grounding DINO: marrying DINO with grounded pre-training for open-set object detection. In: arXiv (2023)","DOI":"10.1007\/978-3-031-72970-6_3"},{"key":"10_CR19","doi-asserted-by":"crossref","unstructured":"Lu, Y., et al.: Open-vocabulary point-cloud object detection without 3D annotation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00121"},{"key":"10_CR20","doi-asserted-by":"crossref","unstructured":"Mo, K., et al.: PartNet: a large-scale benchmark for fine-grained and hierarchical part-level 3D object understanding. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00100"},{"key":"10_CR21","doi-asserted-by":"crossref","unstructured":"Nguyen, P.D.A., et al.: Open3DIS: open-vocabulary 3D instance segmentation with 2D mask guidance. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00385"},{"key":"10_CR22","doi-asserted-by":"crossref","unstructured":"Peng, S., Genova, K., Jiang, C.M., Tagliasacchi, A., Pollefeys, M., Funkhouser, T.: OpenScene: 3D scene understanding with open vocabularies. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00085"},{"key":"10_CR23","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: ICML (2021)"},{"key":"10_CR24","doi-asserted-by":"crossref","unstructured":"Roynard, X., Deschaud, J.E., Goulette, F.: Paris-Lille-3D: a large and high-quality ground-truth urban point cloud dataset for automatic segmentation and classification. In: The International booktitle of Robotics Research (2018)","DOI":"10.1109\/CVPRW.2018.00272"},{"key":"10_CR25","doi-asserted-by":"crossref","unstructured":"Rozenberszki, D., Litany, O., Dai, A.: Language-grounded indoor 3D semantic segmentation in the wild. In: ECCV (2022)","DOI":"10.1007\/978-3-031-19827-4_8"},{"key":"10_CR26","doi-asserted-by":"crossref","unstructured":"Schult, J., Engelmann, F., Hermans, A., Litany, O., Tang, S., Leibe, B.: Mask3D: mask transformer for 3D semantic instance segmentation. In: ICRA (2023)","DOI":"10.1109\/ICRA48891.2023.10160590"},{"key":"10_CR27","unstructured":"Straub, J., et al.: The Replica Dataset: a digital replica of indoor spaces. In: arXiv (2019)"},{"key":"10_CR28","doi-asserted-by":"crossref","unstructured":"Su, H., Maji, S., Kalogerakis, E., Learned-Miller, E.G.: Multi-view convolutional neural networks for 3D shape recognition. In: ICCV (2015)","DOI":"10.1109\/ICCV.2015.114"},{"key":"10_CR29","unstructured":"Takmaz, A., Fedele, E., Sumner, R.W., Pollefeys, M., Tombari, F., Engelmann, F.: OpenMask3d: open-vocabulary 3D instance segmentation. In: NeurIPS (2023)"},{"key":"10_CR30","doi-asserted-by":"crossref","unstructured":"Tan, W., et al.: Toronto-3D: a large-scale mobile lidar dataset for semantic segmentation of urban roadways. In: CVPRW (2020)","DOI":"10.1109\/CVPRW50498.2020.00109"},{"key":"10_CR31","doi-asserted-by":"crossref","unstructured":"Xu, C., et al.: Image2Point: 3D point-cloud understanding with 2D image pretrained models. In: ECCV (2022)","DOI":"10.1007\/978-3-031-19836-6_36"},{"key":"10_CR32","doi-asserted-by":"crossref","unstructured":"Xu, J., Liu, S., Vahdat, A., Byeon, W., Wang, X., De\u00a0Mello, S.: Open-vocabulary panoptic segmentation with text-to-image diffusion models. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00289"},{"key":"10_CR33","doi-asserted-by":"crossref","unstructured":"Yang, J., Ding, R., Wang, Z., Qi, X.: RegionPLC: regional point-language contrastive learning for open-world 3D scene understanding. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.01874"},{"key":"10_CR34","doi-asserted-by":"crossref","unstructured":"Zeng, Y., et al.: Clip$$^2$$: contrastive language-image-point pretraining from real-world point cloud data. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01463"},{"key":"10_CR35","doi-asserted-by":"crossref","unstructured":"Zhang, D., et al.: FM-OV3D: foundation model-based cross-modal knowledge blending for open-vocabulary 3D detection. In: AAAI (2024)","DOI":"10.1609\/aaai.v38i15.29612"},{"key":"10_CR36","doi-asserted-by":"crossref","unstructured":"Zhang, R., et al.: PointCLIP: point cloud understanding by CLIP. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00836"},{"key":"10_CR37","doi-asserted-by":"crossref","unstructured":"Zhou, C., Loy, C.C., Dai, B.: DenseCLIP: extract free dense labels from CLIP. In: CVPR (2022)","DOI":"10.1007\/978-3-031-19815-1_40"},{"key":"10_CR38","doi-asserted-by":"crossref","unstructured":"Zhou, C., Loy, C.C., Dai, B.: Extract free dense labels from clip. In: ECCV (2022)","DOI":"10.1007\/978-3-031-19815-1_40"},{"key":"10_CR39","unstructured":"Zhou, J., Wang, J., Ma, B., Liu, Y.S., Huang, T., Wang, X.: Uni3D: exploring unified 3D representation at scale. In: ICLR (2024)"},{"key":"10_CR40","doi-asserted-by":"crossref","unstructured":"Zhou, X., Girdhar, R., Joulin, A., Kr\u00e4henb\u00fchl, P., Misra, I.: Detecting twenty-thousand classes using image-level supervision. In: ECCV (2022)","DOI":"10.1007\/978-3-031-20077-9_21"},{"key":"10_CR41","unstructured":"Zhu, C., Zhang, W., Wang, T., Liu, X., Chen, K.: Object2Scene: putting objects in context for open-vocabulary 3D detection. In: arXiv (2023)"},{"key":"10_CR42","doi-asserted-by":"crossref","unstructured":"Zhu, X., et al.: PointCLIP v2: prompting CLIP and GPT for powerful 3D open-world learning. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00249"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73033-7_10","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,30]],"date-time":"2024-11-30T15:04:01Z","timestamp":1732979041000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73033-7_10"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,31]]},"ISBN":["9783031730320","9783031730337"],"references-count":42,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73033-7_10","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10,31]]},"assertion":[{"value":"31 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}