{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T00:06:13Z","timestamp":1773705973243,"version":"3.50.1"},"publisher-location":"Cham","reference-count":90,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031727740","type":"print"},{"value":"9783031727757","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T00:00:00Z","timestamp":1727654400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T00:00:00Z","timestamp":1727654400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72775-7_24","type":"book-chapter","created":{"date-parts":[[2024,9,29]],"date-time":"2024-09-29T07:01:50Z","timestamp":1727593310000},"page":"419-437","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":41,"title":["Open-Vocabulary SAM: Segment and\u00a0Recognize Twenty-Thousand Classes Interactively"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-9770-7720","authenticated-orcid":false,"given":"Haobo","family":"Yuan","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0550-8247","authenticated-orcid":false,"given":"Xiangtai","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9776-7739","authenticated-orcid":false,"given":"Chong","family":"Zhou","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1753-0887","authenticated-orcid":false,"given":"Yining","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6820-2325","authenticated-orcid":false,"given":"Kai","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5345-1591","authenticated-orcid":false,"given":"Chen Change","family":"Loy","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,30]]},"reference":[{"key":"24_CR1","unstructured":"Bala\u017eevi\u0107, I., Steiner, D., Parthasarathy, N., Arandjelovi\u0107, R., H\u00e9naff, O.J.: Towards in-context scene understanding. In: NeurIPS (2023)"},{"key":"24_CR2","unstructured":"Bao, H., Dong, L., Piao, S., Wei, F.: BEiT: BERT pre-training of image transformers. In: ICLR (2022)"},{"key":"24_CR3","unstructured":"Bar, A., Gandelsman, Y., Darrell, T., Globerson, A., Efros, A.: Visual prompting via image inpainting. In: NeurIPS (2022)"},{"key":"24_CR4","unstructured":"Brown, T., et\u00a0al.: Language models are few-shot learners. In: NeurIPS (2020)"},{"key":"24_CR5","doi-asserted-by":"crossref","unstructured":"Chen, C., et al.: MA-SAM: modality-agnostic SAM adaptation for 3D medical image segmentation. arXiv preprint arXiv:2309.08842 (2023)","DOI":"10.1016\/j.media.2024.103310"},{"key":"24_CR6","unstructured":"Chen, J., Yang, Z., Zhang, L.: Semantic segment anything (2023)"},{"key":"24_CR7","unstructured":"Chen, K., et\u00a0al.: MMDetection: openMMLab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155 (2019)"},{"key":"24_CR8","unstructured":"Chen, Z., et al.: Vision transformer adapter for dense predictions. In: ICLR (2023)"},{"key":"24_CR9","doi-asserted-by":"crossref","unstructured":"Cheng, B., Misra, I., Schwing, A.G., Kirillov, A., Girdhar, R.: Masked-attention mask transformer for universal image segmentation. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00135"},{"key":"24_CR10","unstructured":"Cheng, Y., et al.: Segment and track anything. arXiv preprint arXiv:2305.06558 (2023)"},{"key":"24_CR11","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: CVPR (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"24_CR12","doi-asserted-by":"crossref","unstructured":"Ding, J., Xue, N., Xia, G.S., Dai, D.: Decoupling zero-shot semantic segmentation. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01129"},{"key":"24_CR13","unstructured":"Ding, Z., Wang, J., Tu, Z.: Open-vocabulary universal image segmentation with MaskCLIP. In: ICML (2023)"},{"key":"24_CR14","unstructured":"Dosovitskiy, A., et al.: An image is worth 16$$\\,\\times \\,$$16 words: transformers for image recognition at scale. In: ICLR (2021)"},{"key":"24_CR15","doi-asserted-by":"crossref","unstructured":"Fang, Y., Yang, S., Wang, S., Ge, Y., Shan, Y., Wang, X.: Unleashing vanilla vision transformer with masked image modeling for object detection. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00574"},{"key":"24_CR16","unstructured":"Fang, Z., Li, X., Li, X., Buhmann, J.M., Loy, C.C., Liu, M.: Explore in-context learning for 3D point cloud understanding. In: NeurIPS (2023)"},{"issue":"2","key":"24_CR17","doi-asserted-by":"publisher","first-page":"581","DOI":"10.1007\/s11263-023-01891-x","volume":"132","author":"P Gao","year":"2024","unstructured":"Gao, P., et al.: CLIP-Adapter: better vision-language models with feature adapters. Int. J. Comput. Vis. 132(2), 581\u2013595 (2024). https:\/\/doi.org\/10.1007\/s11263-023-01891-x","journal-title":"Int. J. Comput. Vis."},{"key":"24_CR18","unstructured":"Gu, X., Lin, T.Y., Kuo, W., Cui, Y.: Open-vocabulary object detection via vision and language knowledge distillation. In: ICLR (2021)"},{"key":"24_CR19","doi-asserted-by":"crossref","unstructured":"Gupta, A., Dollar, P., Girshick, R.: LVIS: a dataset for large vocabulary instance segmentation. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00550"},{"key":"24_CR20","unstructured":"Han, X., et al.: Boosting segment anything model towards open-vocabulary learning. arXiv preprint arXiv:2312.03628 (2023)"},{"key":"24_CR21","doi-asserted-by":"crossref","unstructured":"He, K., Chen, X., Xie, S., Li, Y., Doll\u00e1r, P., Girshick, R.: Masked autoencoders are scalable vision learners. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"24_CR22","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., Girshick, R.: mask R-CNN. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.322"},{"key":"24_CR23","unstructured":"Hu, E.J., et al.: LoRA: low-rank adaptation of large language models. In: ICLR (2022)"},{"key":"24_CR24","doi-asserted-by":"crossref","unstructured":"Huynh, D., Kuen, J., Lin, Z., Gu, J., Elhamifar, E.: Open-vocabulary instance segmentation via robust cross-modal pseudo-labeling. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00689"},{"key":"24_CR25","unstructured":"Jayaraman, D., Grauman, K.: Zero-shot recognition with unreliable attributes. In: NeurIPS (2014)"},{"key":"24_CR26","unstructured":"Jia, C., et al.: Scaling up visual and vision-language representation learning with noisy text supervision. In: ICML (2021)"},{"key":"24_CR27","doi-asserted-by":"crossref","unstructured":"Kim, D., Lin, T.Y., Angelova, A., Kweon, I.S., Kuo, W.: Learning open-world object proposals without learning to classify. RA-L (2022)","DOI":"10.1109\/LRA.2022.3146922"},{"key":"24_CR28","unstructured":"Kim, W., Son, B., Kim, I.: ViLT: vision-and-language transformer without convolution or region supervision. In: ICML (2021)"},{"key":"24_CR29","doi-asserted-by":"crossref","unstructured":"Kirillov, A., He, K., Girshick, R., Rother, C., Doll\u00e1r, P.: Panoptic segmentation. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00963"},{"key":"24_CR30","doi-asserted-by":"crossref","unstructured":"Kirillov, A., et\u00a0al.: Segment anything. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"24_CR31","unstructured":"Kuo, W., Cui, Y., Gu, X., Piergiovanni, A.J., Angelova, A.: F-VLM: open-vocabulary object detection upon frozen vision and language models. In: ICLR (2023)"},{"key":"24_CR32","unstructured":"Li, F., et al.: Semantic-SAM: segment and recognize anything at any granularity. arXiv preprint arXiv:2307.04767 (2023)"},{"key":"24_CR33","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In: ICML (2023)"},{"key":"24_CR34","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: Blip: bootstrapping language-image pre-training for unified vision-language understanding and generation. In: ICML (2022)"},{"key":"24_CR35","unstructured":"Li, J., Selvaraju, R.R., Gotmare, A., Joty, S.R., Xiong, C., Hoi, S.C.: Align before fuse: vision and language representation learning with momentum distillation. In: NeurIPS (2021)"},{"key":"24_CR36","unstructured":"Li, S., Cao, J., Ye, P., Ding, Y., Tu, C., Chen, T.: ClipSAM: CLIP and SAM collaboration for zero-shot anomaly segmentation. arXiv preprint arXiv:2401.12665 (2024)"},{"key":"24_CR37","unstructured":"Li, X., et al.: Transformer-based visual segmentation: a survey. arXiv pre-print (2023)"},{"key":"24_CR38","doi-asserted-by":"crossref","unstructured":"Li, X., et al.: Semantic flow for fast and accurate scene parsing. In: ECCV (2020)","DOI":"10.1007\/978-3-030-58452-8_45"},{"key":"24_CR39","doi-asserted-by":"crossref","unstructured":"Li, X., et al.: OMG-Seg: is one model good enough for all segmentation? In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.02640"},{"issue":"2","key":"24_CR40","doi-asserted-by":"publisher","first-page":"466","DOI":"10.1007\/s11263-023-01875-x","volume":"132","author":"X Li","year":"2024","unstructured":"Li, X., et al.: SFNet: faster and accurate semantic segmentation via semantic flow. Int. J. Comput. Vis. 132(2), 466\u2013489 (2024). https:\/\/doi.org\/10.1007\/s11263-023-01875-x","journal-title":"Int. J. Comput. Vis."},{"key":"24_CR41","doi-asserted-by":"crossref","unstructured":"Li, Y., Fan, H., Hu, R., Feichtenhofer, C., He, K.: Scaling language-image pre-training via masking. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.02240"},{"key":"24_CR42","unstructured":"Lian, D., Zhou, D., Feng, J., Wang, X.: Scaling & shifting your features: A new baseline for efficient model tuning. In: NeurIPS (2022)"},{"key":"24_CR43","doi-asserted-by":"crossref","unstructured":"Lin, T.Y., et al.: Microsoft COCO: common objects in context. In: ECCV (2014)","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"24_CR44","doi-asserted-by":"crossref","unstructured":"Liu, S., et\u00a0al.: Grounding DINO: marrying DINO with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499 (2023)","DOI":"10.1007\/978-3-031-72970-6_3"},{"key":"24_CR45","unstructured":"Liu, Y., Qi, L., Tsai, Y.J., Li, X., Chan, K.C.K., Yang, M.H.: Effective adapter for face recognition in the wild. arXiv preprint (2023)"},{"key":"24_CR46","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: ICLR (2019)"},{"key":"24_CR47","doi-asserted-by":"crossref","unstructured":"Maaz, M., Rasheed, H., Khan, S., Khan, F.S., Anwer, R.M., Yang, M.H.: Class-agnostic object detection with multi-modal transformer. In: ECCV (2022)","DOI":"10.1007\/978-3-031-20080-9_30"},{"key":"24_CR48","doi-asserted-by":"crossref","unstructured":"Milletari, F., Navab, N., Ahmadi, S.: V-Net: fully convolutional neural networks for volumetric medical image segmentation. In: 3DV (2016)","DOI":"10.1109\/3DV.2016.79"},{"key":"24_CR49","doi-asserted-by":"crossref","unstructured":"Pan, T., Tang, L., Wang, X., Shan, S.: Tokenize anything via prompting. arXiv preprint arXiv:2312.09128 (2023)","DOI":"10.1007\/978-3-031-72970-6_19"},{"key":"24_CR50","unstructured":"Paszke, A., et\u00a0al.: PyTorch: an imperative style, high-performance deep learning library. In: NeurIPS (2019)"},{"key":"24_CR51","doi-asserted-by":"crossref","unstructured":"Qi, L., et al.: High-quality entity segmentation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00374"},{"key":"24_CR52","doi-asserted-by":"crossref","unstructured":"Qi, L., et al.: Open world entity segmentation. IEEE TPAMI (2022)","DOI":"10.1109\/TPAMI.2022.3227513"},{"key":"24_CR53","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: ICML (2021)"},{"key":"24_CR54","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"24_CR55","doi-asserted-by":"crossref","unstructured":"Rubin, O., Herzig, J., Berant, J.: Learning to retrieve prompts for in-context learning. arXiv:2112.08633 (2021)","DOI":"10.18653\/v1\/2022.naacl-main.191"},{"key":"24_CR56","doi-asserted-by":"crossref","unstructured":"Shao, S., et al.: Objects365: a large-scale, high-quality dataset for object detection. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00852"},{"key":"24_CR57","doi-asserted-by":"crossref","unstructured":"Shi, C., Yang, S.: EdaDet: open-vocabulary object detection using early dense alignment. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01441"},{"key":"24_CR58","unstructured":"Sun, Q., Fang, Y., Wu, L., Wang, X., Cao, Y.: EVA-CLIP: improved training techniques for CLIP at scale. arXiv preprint arXiv:2303.15389 (2023)"},{"key":"24_CR59","doi-asserted-by":"crossref","unstructured":"Wang, H., et al.: SAM-CLIP: merging vision foundation models towards semantic and spatial understanding. arXiv preprint arXiv:2310.15308 (2023)","DOI":"10.1109\/CVPRW63382.2024.00367"},{"key":"24_CR60","doi-asserted-by":"crossref","unstructured":"Wang, J., et al.: V3Det: vast vocabulary visual detection dataset. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01817"},{"key":"24_CR61","doi-asserted-by":"crossref","unstructured":"Wang, X., Wang, W., Cao, Y., Shen, C., Huang, T.: Images speak in images: a generalist painter for in-context visual learning. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00660"},{"key":"24_CR62","doi-asserted-by":"crossref","unstructured":"Wang, X., Zhang, X., Cao, Y., Wang, W., Shen, C., Huang, T.: SegGPT: segmenting everything in context. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00110"},{"key":"24_CR63","doi-asserted-by":"crossref","unstructured":"Wu, J., et al.: Betrayed by captions: joint caption grounding and generation for open vocabulary instance segmentation. In: ICCV (2023)","DOI":"10.36227\/techrxiv.22082723.v1"},{"key":"24_CR64","doi-asserted-by":"crossref","unstructured":"Wu, J., et al.: Towards open vocabulary learning: a survey. IEEE TPAMI (2024)","DOI":"10.1109\/TPAMI.2024.3361862"},{"key":"24_CR65","unstructured":"Wu, J., et al.: Medical SAM Adapter: adapting segment anything model for medical image segmentation. arXiv preprint arXiv:2304.12620 (2023)"},{"key":"24_CR66","doi-asserted-by":"crossref","unstructured":"Wu, S., Zhang, W., Jin, S., Liu, W., Loy, C.C.: Aligning bag of regions for open-vocabulary object detection. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01464"},{"key":"24_CR67","unstructured":"Wu, S., et al.: CLIPSelf: vision transformer distills itself for open-vocabulary dense prediction. In: ICLR (2024)"},{"key":"24_CR68","doi-asserted-by":"crossref","unstructured":"Wu, X., Zhu, F., Zhao, R., Li, H.: CORA: adapting CLIP for open-vocabulary detection with region prompting and anchor pre-matching. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00679"},{"key":"24_CR69","doi-asserted-by":"crossref","unstructured":"Xie, J., Li, W., Li, X., Liu, Z., Ong, Y.S., Loy, C.C.: MosaicFusion: diffusion models as data augmenters for large vocabulary instance segmentation. arXiv preprint arXiv:2309.13042 (2023)","DOI":"10.1007\/s11263-024-02223-3"},{"key":"24_CR70","doi-asserted-by":"crossref","unstructured":"Xu, J., Liu, S., Vahdat, A., Byeon, W., Wang, X., De\u00a0Mello, S.: Open-vocabulary panoptic segmentation with text-to-image diffusion models. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00289"},{"key":"24_CR71","doi-asserted-by":"crossref","unstructured":"Xu, M., Zhang, Z., Wei, F., Hu, H., Bai, X.: Side adapter network for open-vocabulary semantic segmentation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00288"},{"key":"24_CR72","doi-asserted-by":"crossref","unstructured":"Xu, M., et al.: A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In: ECCV (2022)","DOI":"10.1007\/978-3-031-19818-2_42"},{"key":"24_CR73","unstructured":"Xu, S., et al.: DST-DET: simple dynamic self-training for open-vocabulary object detection. arXiv pre-print (2023)"},{"key":"24_CR74","doi-asserted-by":"crossref","unstructured":"Xu, X., Xiong, T., Ding, Z., Tu, Z.: MasQCLIP for open-vocabulary universal image segmentation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00088"},{"key":"24_CR75","doi-asserted-by":"crossref","unstructured":"Yang, J., et\u00a0al.: Panoptic video scene graph generation. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01791"},{"key":"24_CR76","unstructured":"Yu, Q., He, J., Deng, X., Shen, X., Chen, L.C.: Convolutions die hard: open-vocabulary segmentation with single frozen convolutional CLIP. In: NeurIPS (2023)"},{"key":"24_CR77","doi-asserted-by":"crossref","unstructured":"Yu, X., Tang, L., Rao, Y., Huang, T., Zhou, J., Lu, J.: Point-BERT: pre-training 3D point cloud transformers with masked point modeling. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01871"},{"key":"24_CR78","doi-asserted-by":"crossref","unstructured":"Zang, Y., Li, W., Zhou, K., Huang, C., Loy, C.C.: Open-vocabulary DETR with conditional matching. In: ECCV (2022)","DOI":"10.1007\/978-3-031-20077-9_7"},{"key":"24_CR79","doi-asserted-by":"crossref","unstructured":"Zareian, A., Rosa, K.D., Hu, D.H., Chang, S.F.: Open-vocabulary object detection using captions. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01416"},{"key":"24_CR80","doi-asserted-by":"crossref","unstructured":"Zhai, X., et al.: LiT: zero-shot transfer with locked-image text tuning. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01759"},{"key":"24_CR81","unstructured":"Zhang, C., et al.: Faster segment anything: towards lightweight SAM for mobile applications. arXiv preprint arXiv:2306.14289 (2023)"},{"key":"24_CR82","unstructured":"Zhang, R., et al.: Personalize segment anything model with one shot. arXiv preprint arXiv:2305.03048 (2023)"},{"key":"24_CR83","unstructured":"Zhao, X., et al.: Fast segment anything. arXiv preprint arXiv:2306.12156 (2023)"},{"issue":"3","key":"24_CR84","doi-asserted-by":"publisher","first-page":"302","DOI":"10.1007\/s11263-018-1140-0","volume":"127","author":"B Zhou","year":"2018","unstructured":"Zhou, B., et al.: Semantic understanding of scenes through the ADE20K dataset. Int. J. Comput. Vis. 127(3), 302\u2013321 (2018). https:\/\/doi.org\/10.1007\/s11263-018-1140-0","journal-title":"Int. J. Comput. Vis."},{"key":"24_CR85","unstructured":"Zhou, C., Li, X., Loy, C.C., Dai, B.: EdgeSAM: prompt-in-the-loop distillation for on-device deployment of SAM. arXiv preprint arXiv:2312.06660 (2023)"},{"key":"24_CR86","doi-asserted-by":"crossref","unstructured":"Zhou, C., Loy, C.C., Dai, B.: Extract free dense labels from CLIP. In: ECCV (2022)","DOI":"10.1007\/978-3-031-19815-1_40"},{"key":"24_CR87","unstructured":"Zhou, H., et al.: Rethinking evaluation metrics of open-vocabulary segmentaion. arXiv preprint arXiv:2311.03352 (2023)"},{"key":"24_CR88","doi-asserted-by":"publisher","unstructured":"Zhou, K., Yang, J., Loy, C.C., Liu, Z.: Learning to prompt for vision-language models. IJCV (2022). https:\/\/doi.org\/10.1007\/s11263-022-01653-1","DOI":"10.1007\/s11263-022-01653-1"},{"key":"24_CR89","doi-asserted-by":"crossref","unstructured":"Zhou, X., Girdhar, R., Joulin, A., Kr\u00e4henb\u00fchl, P., Misra, I.: Detecting twenty-thousand classes using image-level supervision. In: ECCV (2022)","DOI":"10.1007\/978-3-031-20077-9_21"},{"key":"24_CR90","unstructured":"Zou, X., et al.: Segment everything everywhere all at once. In: NeurIPS (2023)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72775-7_24","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T21:22:47Z","timestamp":1732828967000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72775-7_24"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,30]]},"ISBN":["9783031727740","9783031727757"],"references-count":90,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72775-7_24","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,9,30]]},"assertion":[{"value":"30 September 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}