{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,4]],"date-time":"2026-04-04T14:26:41Z","timestamp":1775312801224,"version":"3.50.1"},"publisher-location":"Cham","reference-count":61,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031729515","type":"print"},{"value":"9783031729522","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72952-2_26","type":"book-chapter","created":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T05:02:02Z","timestamp":1727672522000},"page":"456-473","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["WildRefer: 3D Object Localization in\u00a0Large-Scale Dynamic Scenes with\u00a0Multi-modal Visual Data and\u00a0Natural Language"],"prefix":"10.1007","author":[{"given":"Zhenxiang","family":"Lin","sequence":"first","affiliation":[]},{"given":"Xidong","family":"Peng","sequence":"additional","affiliation":[]},{"given":"Peishan","family":"Cong","sequence":"additional","affiliation":[]},{"given":"Ge","family":"Zheng","sequence":"additional","affiliation":[]},{"given":"Yujin","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Yuenan","family":"Hou","sequence":"additional","affiliation":[]},{"given":"Xinge","family":"Zhu","sequence":"additional","affiliation":[]},{"given":"Sibei","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Yuexin","family":"Ma","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,1]]},"reference":[{"key":"26_CR1","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"422","DOI":"10.1007\/978-3-030-58452-8_25","volume-title":"Computer Vision \u2013 ECCV 2020","author":"P Achlioptas","year":"2020","unstructured":"Achlioptas, P., Abdelreheem, A., Xia, F., Elhoseiny, M., Guibas, L.: ReferIt3D: neural listeners for fine-grained 3D object identification in real-world scenes. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12346, pp. 422\u2013440. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58452-8_25"},{"key":"26_CR2","doi-asserted-by":"crossref","unstructured":"Bai, X., et al.: Transfusion: robust lidar-camera fusion for 3d object detection with transformers. In: CVPR, pp. 1090\u20131099 (2022)","DOI":"10.1109\/CVPR52688.2022.00116"},{"key":"26_CR3","doi-asserted-by":"crossref","unstructured":"Behley, J., et al.: Semantickitti: a dataset for semantic scene understanding of lidar sequences. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 9297\u20139307 (2019)","DOI":"10.1109\/ICCV.2019.00939"},{"key":"26_CR4","doi-asserted-by":"crossref","unstructured":"Caesar, H., et al.: nuscenes: a multimodal dataset for autonomous driving. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 11621\u201311631 (2020)","DOI":"10.1109\/CVPR42600.2020.01164"},{"key":"26_CR5","doi-asserted-by":"crossref","unstructured":"Cai, D., Zhao, L., Zhang, J., Sheng, L., Xu, D.: 3djcg: a unified framework for joint dense captioning and visual grounding on 3d point clouds. In: CVPR, pp. 16464\u201316473 (2022)","DOI":"10.1109\/CVPR52688.2022.01597"},{"key":"26_CR6","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"202","DOI":"10.1007\/978-3-030-58565-5_13","volume-title":"Computer Vision \u2013 ECCV 2020","author":"DZ Chen","year":"2020","unstructured":"Chen, D.Z., Chang, A.X., Nie\u00dfner, M.: ScanRefer: 3D object localization in RGB-D scans using natural language. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12365, pp. 202\u2013221. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58565-5_13"},{"key":"26_CR7","unstructured":"Chen, J., Luo, W., Wei, X., Ma, L., Zhang, W.: Ham: hierarchical attention model with high performance for 3d visual grounding. arXiv preprint arXiv:2210.12513 (2022)"},{"key":"26_CR8","doi-asserted-by":"crossref","unstructured":"Chen, X., Zhang, T., Wang, Y., Wang, Y., Zhao, H.: Futr3d: a unified sensor fusion framework for 3d detection. arXiv preprint arXiv:2203.10642 (2022)","DOI":"10.1109\/CVPRW59228.2023.00022"},{"key":"26_CR9","doi-asserted-by":"crossref","unstructured":"Chen, Z., Hu, R., Chen, X., Nie\u00dfner, M., Chang, A.X.: Unit3d: a unified transformer for 3d dense captioning and visual grounding. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 18109\u201318119 (2023)","DOI":"10.1109\/ICCV51070.2023.01660"},{"key":"26_CR10","unstructured":"Cho, J., Lei, J., Tan, H., Bansal, M.: Unifying vision-and-language tasks via text generation. In: ICML, pp. 1931\u20131942. PMLR (2021)"},{"key":"26_CR11","doi-asserted-by":"crossref","unstructured":"Cong, P., et al.: Weakly supervised 3d multi-person pose estimation for large-scale scenes based on monocular camera and single lidar. arXiv preprint arXiv:2211.16951 (2022)","DOI":"10.1609\/aaai.v37i1.25120"},{"key":"26_CR12","doi-asserted-by":"crossref","unstructured":"Cong, P., et al.: Stcrowd: a multimodal dataset for pedestrian perception in crowded scenes. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01899"},{"key":"26_CR13","doi-asserted-by":"crossref","unstructured":"De\u00a0Vries, H., Strub, F., Chandar, S., Pietquin, O., Larochelle, H., Courville, A.: Guesswhat?! visual object discovery through multi-modal dialogue, pp. 5503\u20135512 (2017)","DOI":"10.1109\/CVPR.2017.475"},{"key":"26_CR14","doi-asserted-by":"crossref","unstructured":"Deng, J., Yang, Z., Chen, T., Zhou, W., Li, H.: Transvg: end-to-end visual grounding with transformers. In: ICCV, pp. 1769\u20131779 (2021)","DOI":"10.1109\/ICCV48922.2021.00179"},{"key":"26_CR15","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"},{"key":"26_CR16","doi-asserted-by":"crossref","unstructured":"Feng, M., Li, Z., et al.: Free-form description guided 3d visual graph network for object grounding in point cloud. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 3722\u20133731 (2021)","DOI":"10.1109\/ICCV48922.2021.00370"},{"key":"26_CR17","unstructured":"Han, X., Cong, P., Xu, L., Wang, J., Yu, J., Ma, Y.: Licamgait: gait recognition in the wild by using lidar and camera multi-modal visual sensors. arXiv preprint arXiv:2211.12371 (2022)"},{"key":"26_CR18","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"26_CR19","doi-asserted-by":"crossref","unstructured":"Hu, R., Rohrbach, M., et al.: Modeling relationships in referential expressions with compositional modular networks. In: CVPR, pp. 1115\u20131124 (2017)","DOI":"10.1109\/CVPR.2017.470"},{"key":"26_CR20","unstructured":"Huang, J., Qin, Y., Qi, J., Sun, Q., Zhang, H.: Deconfounded visual grounding. arXiv preprint arXiv:2112.15324 (2021)"},{"key":"26_CR21","doi-asserted-by":"crossref","unstructured":"Huang, S., Chen, Y., Jia, J., Wang, L.: Multi-view transformer for 3d visual grounding. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15524\u201315533 (2022)","DOI":"10.1109\/CVPR52688.2022.01508"},{"key":"26_CR22","doi-asserted-by":"crossref","unstructured":"Jain, A., Gkanatsios, N., Mediratta, I., Fragkiadaki, K.: Bottom up top down detection transformers for language grounding in images and point clouds. CoRR arxiv:2112.08879 (2021)","DOI":"10.1007\/978-3-031-20059-5_24"},{"key":"26_CR23","doi-asserted-by":"crossref","unstructured":"Kamath, A., Singh, M., et al: Mdetr-modulated detection for end-to-end multi-modal understanding. In: ICCV, pp. 1780\u20131790 (2021)","DOI":"10.1109\/ICCV48922.2021.00180"},{"key":"26_CR24","doi-asserted-by":"crossref","unstructured":"Kolmet, M., Zhou, Q., O\u0161ep, A., Leal-Taix\u00e9, L.: Text2pos: text-to-point-cloud cross-modal localization. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6687\u20136696 (2022)","DOI":"10.1109\/CVPR52688.2022.00657"},{"issue":"1","key":"26_CR25","doi-asserted-by":"publisher","first-page":"32","DOI":"10.1007\/s11263-016-0981-7","volume":"123","author":"R Krishna","year":"2017","unstructured":"Krishna, R., et al.: Visual genome: connecting language and vision using crowdsourced dense image annotations. Int. J. Comput. Vision 123(1), 32\u201373 (2017)","journal-title":"Int. J. Comput. Vision"},{"key":"26_CR26","doi-asserted-by":"crossref","unstructured":"Li, Y., et\u00a0al.: Deepfusion: lidar-camera deep fusion for multi-modal 3d object detection. In: CVPR, pp. 17182\u201317191 (2022)","DOI":"10.1109\/CVPR52688.2022.01667"},{"key":"26_CR27","unstructured":"Liang, T., et al.: Bevfusion: a simple and robust lidar-camera fusion framework. arXiv preprint arXiv:2205.13790 (2022)"},{"key":"26_CR28","doi-asserted-by":"crossref","unstructured":"Liao, Y., et al.: A real-time cross-modality correlation filtering method for referring expression comprehension. In: CVPR, pp. 10880\u201310889 (2020)","DOI":"10.1109\/CVPR42600.2020.01089"},{"key":"26_CR29","doi-asserted-by":"crossref","unstructured":"Liu, H., Lin, A., Han, X., Yang, L., Yu, Y., Cui, S.: Refer-it-in-rgbd: a bottom-up approach for 3d visual grounding in rgbd images. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6032\u20136041 (2021)","DOI":"10.1109\/CVPR46437.2021.00597"},{"key":"26_CR30","doi-asserted-by":"crossref","unstructured":"Liu, R., Liu, C., Bai, Y., Yuille, A.L.: Clevr-ref+: diagnosing visual reasoning with referring expressions. In: CVPR, pp. 4185\u20134194 (2019)","DOI":"10.1109\/CVPR.2019.00431"},{"key":"26_CR31","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101 (2017)"},{"key":"26_CR32","unstructured":"Lu, J., Batra, D., Parikh, D., Lee, S.: Vilbert: pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In: Wallach, H., Larochelle, H., Beygelzimer, A., d\u2019Alch\u00e9-Buc, F., Fox, E., Garnett, R. (eds.) Advances in Neural Information Processing Systems, vol.\u00a032. Curran Associates, Inc. (2019). https:\/\/proceedings.neurips.cc\/paper\/2019\/file\/c74d97b01eae257e44aa9d5bade97baf-Paper.pdf"},{"key":"26_CR33","doi-asserted-by":"crossref","unstructured":"Luo, J., et al.: 3d-sps: single-stage 3d visual grounding via referred point progressive selection. In: CVPR, pp. 16454\u201316463 (2022)","DOI":"10.1109\/CVPR52688.2022.01596"},{"key":"26_CR34","doi-asserted-by":"crossref","unstructured":"Mao, J., Huang, J., Toshev, A., Camburu, O., Yuille, A.L., Murphy, K.: Generation and comprehension of unambiguous object descriptions. In: CVPR, pp. 11\u201320 (2016)","DOI":"10.1109\/CVPR.2016.9"},{"key":"26_CR35","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"792","DOI":"10.1007\/978-3-319-46493-0_48","volume-title":"Computer Vision \u2013 ECCV 2016","author":"VK Nagaraja","year":"2016","unstructured":"Nagaraja, V.K., Morariu, V.I., Davis, L.S.: Modeling context between objects for referring expression understanding. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9908, pp. 792\u2013807. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46493-0_48"},{"key":"26_CR36","doi-asserted-by":"crossref","unstructured":"Ott, M., et al.: fairseq: a fast, extensible toolkit for sequence modeling. In: Proceedings of NAACL-HLT 2019: Demonstrations (2019)","DOI":"10.18653\/v1\/N19-4009"},{"key":"26_CR37","doi-asserted-by":"crossref","unstructured":"Qi, C.R., Chen, X., Litany, O., Guibas, L.J.: Imvotenet: boosting 3d object detection in point clouds with image votes. In: CVPR, pp. 4404\u20134413 (2020)","DOI":"10.1109\/CVPR42600.2020.00446"},{"key":"26_CR38","doi-asserted-by":"crossref","unstructured":"Qi, C.R., Liu, W., Wu, C., Su, H., Guibas, L.J.: Frustum pointnets for 3d object detection from rgb-d data. In: CVPR, pp. 918\u2013927 (2018)","DOI":"10.1109\/CVPR.2018.00102"},{"key":"26_CR39","unstructured":"Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: deep hierarchical feature learning on point sets in a metric space. Adv. Neural Inf. Process. Syst. 30 (2017)"},{"key":"26_CR40","doi-asserted-by":"crossref","unstructured":"Rohrbach, A., Rohrbach, M., Tang, S., Joon\u00a0Oh, S., Schiele, B.: Generating descriptions with grounded and co-referenced people. In: CVPR, pp. 4979\u20134989 (2017)","DOI":"10.1109\/CVPR.2017.447"},{"key":"26_CR41","doi-asserted-by":"crossref","unstructured":"Sindagi, V.A., Zhou, Y., Tuzel, O.: Mvx-net: multimodal voxelnet for 3d object detection. In: ICRA, pp. 7276\u20137282. IEEE (2019)","DOI":"10.1109\/ICRA.2019.8794195"},{"key":"26_CR42","unstructured":"Su, W., Zhu, X., etc.: Vl-bert: pre-training of generic visual-linguistic representations. arXiv preprint arXiv:1908.08530 (2019)"},{"key":"26_CR43","unstructured":"Vaswani, A., et al.: Attention is all you need. ArXiv arxiv:1706.03762 (2017)"},{"key":"26_CR44","doi-asserted-by":"crossref","unstructured":"Vora, S., Lang, e.: Pointpainting: sequential fusion for 3d object detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4604\u20134612 (2020)","DOI":"10.1109\/CVPR42600.2020.00466"},{"key":"26_CR45","doi-asserted-by":"crossref","unstructured":"Wang, P., Wu, Q., Cao, J., Shen, C., Gao, L., Hengel, A.V.D.: Neighbourhood watch: referring expression comprehension via language-guided graph attention networks. In: CVPR, pp. 1960\u20131968 (2019)","DOI":"10.1109\/CVPR.2019.00206"},{"key":"26_CR46","doi-asserted-by":"crossref","unstructured":"Wu, Y., Cheng, X., Zhang, R., Cheng, Z., Zhang, J.: Eda: explicit text-decoupling and dense alignment for 3d visual grounding. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19231\u201319242 (2023)","DOI":"10.1109\/CVPR52729.2023.01843"},{"key":"26_CR47","doi-asserted-by":"crossref","unstructured":"Xu, Y., et al.: Human-centric scene understanding for 3d large-scale scenarios. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 20349\u201320359 (2023)","DOI":"10.1109\/ICCV51070.2023.01861"},{"key":"26_CR48","doi-asserted-by":"publisher","unstructured":"Yan, X., et al.: 2dpass: 2d priors assisted semantic segmentation on lidar point clouds. In: ECCV 2022, pp. 677\u2013695. Springer, Heidelberg (2022). https:\/\/doi.org\/10.1007\/978-3-031-19815-1_39","DOI":"10.1007\/978-3-031-19815-1_39"},{"key":"26_CR49","doi-asserted-by":"crossref","unstructured":"Yang, S., Li, G., Yu, Y.: Dynamic graph attention for referring expression comprehension. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 4644\u20134653 (2019)","DOI":"10.1109\/ICCV.2019.00474"},{"key":"26_CR50","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"387","DOI":"10.1007\/978-3-030-58568-6_23","volume-title":"Computer Vision \u2013 ECCV 2020","author":"Z Yang","year":"2020","unstructured":"Yang, Z., Chen, T., Wang, L., Luo, J.: Improving one-stage visual grounding by recursive sub-query construction. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12359, pp. 387\u2013404. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58568-6_23"},{"key":"26_CR51","doi-asserted-by":"crossref","unstructured":"Yang, Z., et al.: A fast and accurate one-stage approach to visual grounding. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00478"},{"key":"26_CR52","doi-asserted-by":"crossref","unstructured":"Yu, L., et al.: Mattnet: modular attention network for referring expression comprehension. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00142"},{"key":"26_CR53","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"69","DOI":"10.1007\/978-3-319-46475-6_5","volume-title":"Computer Vision \u2013 ECCV 2016","author":"L Yu","year":"2016","unstructured":"Yu, L., Poirson, P., Yang, S., Berg, A.C., Berg, T.L.: Modeling context in referring expressions. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9906, pp. 69\u201385. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46475-6_5"},{"key":"26_CR54","doi-asserted-by":"crossref","unstructured":"Yuan, Z., Yan, X., et al.: Instancerefer: cooperative holistic understanding for visual grounding on point clouds through instance multi-level contextual referring. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 1791\u20131800 (2021)","DOI":"10.1109\/ICCV48922.2021.00181"},{"key":"26_CR55","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Gong, Z., Chang, A.X.: Multi3drefer: grounding text description to multiple 3d objects. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 15225\u201315236 (2023)","DOI":"10.1109\/ICCV51070.2023.01397"},{"key":"26_CR56","doi-asserted-by":"crossref","unstructured":"Zhao, L., Cai, D., Sheng, L., Xu, D.: 3dvg-transformer: relation modeling for visual grounding on point clouds. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 2928\u20132937 (2021)","DOI":"10.1109\/ICCV48922.2021.00292"},{"key":"26_CR57","doi-asserted-by":"crossref","unstructured":"Zhou, L., Kalantidis, Y., et al.: Grounded video description. In: CVPR, pp. 6578\u20136587 (2019)","DOI":"10.1109\/CVPR.2019.00674"},{"key":"26_CR58","doi-asserted-by":"crossref","unstructured":"Zhu, C., et al.: Seqtr: a simple yet universal network for visual grounding. arXiv preprint arXiv:2203.16265 (2022)","DOI":"10.1007\/978-3-031-19833-5_35"},{"issue":"10","key":"26_CR59","doi-asserted-by":"publisher","first-page":"6807","DOI":"10.1109\/TPAMI.2021.3098789","volume":"44","author":"X Zhu","year":"2021","unstructured":"Zhu, X., et al.: Cylindrical and asymmetrical 3d convolution networks for lidar-based perception. IEEE Trans. Pattern Anal. Mach. Intell. 44(10), 6807\u20136822 (2021)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"26_CR60","doi-asserted-by":"crossref","unstructured":"Zhu, Z., Ma, X., Chen, Y., Deng, Z., Huang, S., Li, Q.: 3d-vista: pre-trained transformer for 3d vision and text alignment. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2911\u20132921 (2023)","DOI":"10.1109\/ICCV51070.2023.00272"},{"key":"26_CR61","doi-asserted-by":"crossref","unstructured":"Zhuang, Z., Li, R., Jia, K., Wang, Q., Li, Y., Tan, M.: Perception-aware multi-sensor fusion for 3d lidar semantic segmentation. In: ICCV, pp. 16280\u201316290 (2021)","DOI":"10.1109\/ICCV48922.2021.01597"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72952-2_26","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T21:40:16Z","timestamp":1732830016000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72952-2_26"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,1]]},"ISBN":["9783031729515","9783031729522"],"references-count":61,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72952-2_26","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10,1]]},"assertion":[{"value":"1 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}