{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T16:19:47Z","timestamp":1778084387446,"version":"3.51.4"},"publisher-location":"Cham","reference-count":64,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031728969","type":"print"},{"value":"9783031728976","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72897-6_27","type":"book-chapter","created":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T21:37:11Z","timestamp":1733089031000},"page":"477-494","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":9,"title":["EventBind: Learning a\u00a0Unified Representation to\u00a0Bind Them All for\u00a0Event-Based Open-World Understanding"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-5258-1675","authenticated-orcid":false,"given":"Jiazhou","family":"Zhou","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4008-8951","authenticated-orcid":false,"given":"Xu","family":"Zheng","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0004-1450-811X","authenticated-orcid":false,"given":"Yuanhuiyi","family":"Lyu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7485-4493","authenticated-orcid":false,"given":"Lin","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,2]]},"reference":[{"key":"27_CR1","doi-asserted-by":"crossref","unstructured":"Amir, A., et\u00a0al.: A low power, fully event-based gesture recognition system. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.781"},{"key":"27_CR2","unstructured":"Ba, J.L., Kiros, J.R., Hinton, G.E.: Layer normalization. arXiv preprint arXiv:1607.06450 (2016)"},{"key":"27_CR3","unstructured":"Bahng, H., Jahanian, A., Sankaranarayanan, S., Isola, P.: Exploring visual prompts for adapting large-scale models. arXiv preprint arXiv:2203.17274 (2022)"},{"key":"27_CR4","unstructured":"Bai, Y., Wang, C., Xie, S., Dong, C., Yuan, C., Wang, Z.: Textir: a simple framework for text-based editable image restoration. arXiv preprint arXiv:2302.14736 (2023)"},{"key":"27_CR5","doi-asserted-by":"crossref","unstructured":"Baldwin, R.W., Almatrafi, M., Kaufman, J.R., Asari, V., Hirakawa, K.: Inceptive event time-surfaces for object classification using neuromorphic cameras. In: ICIAR (2019)","DOI":"10.1007\/978-3-030-27272-2_35"},{"key":"27_CR6","first-page":"25005","volume":"35","author":"A Bar","year":"2022","unstructured":"Bar, A., Gandelsman, Y., Darrell, T., Globerson, A., Efros, A.: Visual prompting via image inpainting. Adv. Neural. Inf. Process. Syst. 35, 25005\u201325017 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"27_CR7","doi-asserted-by":"crossref","unstructured":"Bi, Y., Chadha, A., Abbas, A., Bourtsoulatze, E., Andreopoulos, Y.: Graph-based object classification for neuromorphic vision sensing. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 491\u2013501 (2019)","DOI":"10.1109\/ICCV.2019.00058"},{"key":"27_CR8","doi-asserted-by":"crossref","unstructured":"Botzheim, J., Obo, T., Kubota, N.: Human gesture recognition for robot partners by spiking neural network and classification learning. In: SCIS (2012)","DOI":"10.1109\/SCIS-ISIS.2012.6505305"},{"key":"27_CR9","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"136","DOI":"10.1007\/978-3-030-58565-5_9","volume-title":"Computer Vision \u2013 ECCV 2020","author":"M Cannici","year":"2020","unstructured":"Cannici, M., Ciccone, M., Romanoni, A., Matteucci, M.: A differentiable recurrent surface for asynchronous event-based data. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12365, pp. 136\u2013152. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58565-5_9"},{"issue":"1","key":"27_CR10","doi-asserted-by":"publisher","first-page":"38","DOI":"10.1007\/s11633-022-1369-5","volume":"20","author":"FL Chen","year":"2023","unstructured":"Chen, F.L., et al.: Vlp: a survey on vision-language pre-training. Mach. Intell. Res. 20(1), 38\u201356 (2023)","journal-title":"Mach. Intell. Res."},{"key":"27_CR11","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: a large-scale hierarchical image database. In: 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248\u2013255. IEEE (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"issue":"6","key":"27_CR12","doi-asserted-by":"publisher","first-page":"141","DOI":"10.1109\/MSP.2012.2211477","volume":"29","author":"L Deng","year":"2012","unstructured":"Deng, L.: The mnist database of handwritten digit images for machine learning research [best of the web]. IEEE Signal Process. Mag. 29(6), 141\u2013142 (2012)","journal-title":"IEEE Signal Process. Mag."},{"key":"27_CR13","doi-asserted-by":"crossref","unstructured":"Deng, Y., Chen, H., Liu, H., Li, Y.: A voxel graph CNN for object classification with event cameras. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00124"},{"key":"27_CR14","doi-asserted-by":"crossref","unstructured":"Du, Y., Liu, Z., Li, J., Zhao, W.X.: A survey of vision-language pre-trained models. arXiv preprint arXiv:2202.10936 (2022)","DOI":"10.24963\/ijcai.2022\/762"},{"key":"27_CR15","unstructured":"Fei-Fei, L., Fergus, R., Perona, P.: Learning generative visual models from few training examples: an incremental bayesian approach tested on 101 object categories. In: 2004 Conference on Computer Vision and Pattern Recognition Workshop, pp. 178\u2013178. IEEE (2004)"},{"issue":"1","key":"27_CR16","doi-asserted-by":"publisher","first-page":"154","DOI":"10.1109\/TPAMI.2020.3008413","volume":"44","author":"G Gallego","year":"2020","unstructured":"Gallego, G., et al.: Event-based vision: a survey. IEEE Trans. Pattern Anal. Mach. Intell. 44(1), 154\u2013180 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"27_CR17","doi-asserted-by":"crossref","unstructured":"Gehrig, D., Loquercio, A., Derpanis, K.G., Scaramuzza, D.: End-to-end learning of representations for asynchronous event-based data. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5633\u20135643 (2019)","DOI":"10.1109\/ICCV.2019.00573"},{"key":"27_CR18","doi-asserted-by":"crossref","unstructured":"Gehrig, D., Loquercio, A., Derpanis, K.G., Scaramuzza, D.: End-to-end learning of representations for asynchronous event-based data. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00573"},{"key":"27_CR19","doi-asserted-by":"crossref","unstructured":"Gu, F., Sng, W., Hu, X., Yu, F.: Eventdrop: data augmentation for event-based learning. arXiv preprint arXiv:2106.05836 (2021)","DOI":"10.24963\/ijcai.2021\/97"},{"key":"27_CR20","doi-asserted-by":"crossref","unstructured":"Gu, F., Sng, W., Taunyazov, T., Soh, H.: Tactilesgnet: a spiking graph neural network for event-based tactile object recognition. In: IROS (2020)","DOI":"10.1109\/IROS45743.2020.9341421"},{"key":"27_CR21","unstructured":"Herzig, R., et al.: Promptonomyvit: multi-task prompt learning improves video transformers using synthetic scene data. arXiv preprint arXiv:2212.04821 (2022)"},{"key":"27_CR22","unstructured":"Huang, X., Li, S., Qu, W., He, T., Zuo, Y., Ouyang, W.: Frozen clip model is efficient point cloud backbone. arXiv preprint arXiv:2212.04098 (2022)"},{"key":"27_CR23","doi-asserted-by":"publisher","unstructured":"Jia, M., et al.: Visual prompt tuning. In: Avidan, S., Brostow, G., Cisse, M., Farinella, G.M., Hassner, T. (eds.) European Conference on Computer Vision, pp. 709\u2013727. Springer, Heidelberg (2022). DOI: https:\/\/doi.org\/10.1007\/978-3-031-19827-4_41","DOI":"10.1007\/978-3-031-19827-4_41"},{"key":"27_CR24","doi-asserted-by":"crossref","unstructured":"Kim, J., Bae, J., Park, G., Zhang, D., Kim, Y.M.: N-imagenet: towards robust, fine-grained object recognition with event cameras. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 2146\u20132156 (2021)","DOI":"10.1109\/ICCV48922.2021.00215"},{"key":"27_CR25","unstructured":"Kirillov, A., et\u00a0al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023)"},{"key":"27_CR26","doi-asserted-by":"crossref","unstructured":"Klenk, S., Bonello, D., Koestler, L., Araslanov, N., Cremers, D.: Masked event modeling: self-supervised pretraining for event cameras. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 2378\u20132388 (2024)","DOI":"10.1109\/WACV57701.2024.00237"},{"key":"27_CR27","unstructured":"Klenk, S., Bonello, D., Koestler, L., Cremers, D.: Masked event modeling: self-supervised pretraining for event cameras. arXiv preprint arXiv:2212.10368 (2022)"},{"issue":"7","key":"27_CR28","doi-asserted-by":"publisher","first-page":"1346","DOI":"10.1109\/TPAMI.2016.2574707","volume":"39","author":"X Lagorce","year":"2016","unstructured":"Lagorce, X., Orchard, G., Galluppi, F., Shi, B.E., Benosman, R.B.: Hots: a hierarchy of event-based time-surfaces for pattern recognition. TPAMI 39(7), 1346\u20131359 (2016)","journal-title":"TPAMI"},{"key":"27_CR29","doi-asserted-by":"crossref","unstructured":"Li, D., Li, J., Li, H., Niebles, J.C., Hoi, S.C.: Align and prompt: video-and-language pre-training with entity prompts. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4953\u20134963 (2022)","DOI":"10.1109\/CVPR52688.2022.00490"},{"key":"27_CR30","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: Blip-2: bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597 (2023)"},{"key":"27_CR31","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"121","DOI":"10.1007\/978-3-030-58577-8_8","volume-title":"Computer Vision \u2013 ECCV 2020","author":"X Li","year":"2020","unstructured":"Li, X., et al.: Oscar: object-semantics aligned pre-training for vision-language tasks. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12375, pp. 121\u2013137. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58577-8_8"},{"key":"27_CR32","doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: Graph-based asynchronous event processing for rapid object recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 934\u2013943 (2021)","DOI":"10.1109\/ICCV48922.2021.00097"},{"key":"27_CR33","doi-asserted-by":"publisher","first-page":"388","DOI":"10.1007\/978-3-031-19833-5_23","volume-title":"European Conference on Computer Vision","author":"Z Lin","year":"2022","unstructured":"Lin, Z., et al.: Frozen clip models are efficient video learners. In: Avidan, S., Brostow, G., Cisse, M., Farinella, G.M., Hassner, T. (eds.) European Conference on Computer Vision, pp. 388\u2013404. Springer, Heidelberg (2022). https:\/\/doi.org\/10.1007\/978-3-031-19833-5_23"},{"key":"27_CR34","doi-asserted-by":"publisher","first-page":"55638","DOI":"10.1109\/ACCESS.2022.3177744","volume":"10","author":"C Liu","year":"2022","unstructured":"Liu, C., Qi, X., Lam, E.Y., Wong, N.: Fast classification and action recognition with event-based imaging. IEEE Access 10, 55638\u201355649 (2022)","journal-title":"IEEE Access"},{"key":"27_CR35","doi-asserted-by":"crossref","unstructured":"Liu, M., et al.: Partslip: low-shot part segmentation for 3d point clouds via pretrained image-language models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 21736\u201321746 (2023)","DOI":"10.1109\/CVPR52729.2023.02082"},{"key":"27_CR36","doi-asserted-by":"crossref","unstructured":"Mahmud, T., Marculescu, D.: Ave-clip: audioclip-based multi-window temporal transformer for audio visual event localization. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 5158\u20135167 (2023)","DOI":"10.1109\/WACV56688.2023.00513"},{"key":"27_CR37","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"415","DOI":"10.1007\/978-3-030-58598-3_25","volume-title":"Computer Vision \u2013 ECCV 2020","author":"N Messikommer","year":"2020","unstructured":"Messikommer, N., Gehrig, D., Loquercio, A., Scaramuzza, D.: Event-based asynchronous sparse convolutional networks. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12353, pp. 415\u2013431. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58598-3_25"},{"key":"27_CR38","doi-asserted-by":"publisher","unstructured":"Ni, B., et al.: Expanding language-image pretrained models for general video recognition. In: Computer Vision\u2013ECCV 2022: 17th European Conference, Tel Aviv, Israel, 23\u201327 October 2022, Proceedings, Part IV, pp. 1\u201318. Springer, Heidelberg (2022). https:\/\/doi.org\/10.1007\/978-3-031-19772-7_1","DOI":"10.1007\/978-3-031-19772-7_1"},{"key":"27_CR39","doi-asserted-by":"publisher","first-page":"437","DOI":"10.3389\/fnins.2015.00437","volume":"9","author":"G Orchard","year":"2015","unstructured":"Orchard, G., Jayawant, A., Cohen, G.K., Thakor, N.: Converting static image datasets to spiking neuromorphic datasets using saccades. Front. Neurosci. 9, 437 (2015)","journal-title":"Front. Neurosci."},{"key":"27_CR40","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"27_CR41","doi-asserted-by":"crossref","unstructured":"Rasheed, H., Khattak, M.U., Maaz, M., Khan, S., Khan, F.S.: Fine-tuned clip models are efficient video learners. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6545\u20136554 (2023)","DOI":"10.1109\/CVPR52729.2023.00633"},{"key":"27_CR42","doi-asserted-by":"crossref","unstructured":"Rebecq, H., Ranftl, R., Koltun, V., Scaramuzza, D.: Events-to-video: bringing modern computer vision to event cameras. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3857\u20133866 (2019)","DOI":"10.1109\/CVPR.2019.00398"},{"key":"27_CR43","doi-asserted-by":"crossref","unstructured":"Schaefer, S., Gehrig, D., Scaramuzza, D.: Aegnn: asynchronous event-based graph neural networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12371\u201312381 (2022)","DOI":"10.1109\/CVPR52688.2022.01205"},{"key":"27_CR44","unstructured":"Shen, S., et al.: Multitask vision-language prompt tuning. arXiv preprint arXiv:2211.11720 (2022)"},{"key":"27_CR45","doi-asserted-by":"crossref","unstructured":"Sironi, A., Brambilla, M., Bourdis, N., Lagorce, X., Benosman, R.: HATS: histograms of averaged time surfaces for robust event-based object classification. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00186"},{"key":"27_CR46","first-page":"200","volume":"34","author":"M Tsimpoukelli","year":"2021","unstructured":"Tsimpoukelli, M., Menick, J.L., Cabi, S., Eslami, S., Vinyals, O., Hill, F.: Multimodal few-shot learning with frozen language models. Adv. Neural. Inf. Process. Syst. 34, 200\u2013212 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"27_CR47","unstructured":"Vaswani, A., et al.: Attention is all you need. Adv. Neural. Inf. Process. Syst. 30 (2017)"},{"key":"27_CR48","doi-asserted-by":"crossref","unstructured":"Wang, J., Zhou, P., Shou, M.Z., Yan, S.: Position-guided text prompt for vision-language pre-training. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 23242\u201323251 (2023)","DOI":"10.1109\/CVPR52729.2023.02226"},{"key":"27_CR49","doi-asserted-by":"crossref","unstructured":"Wang, Y., et al.: EV-gait: event-based robust gait recognition using dynamic vision sensors. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00652"},{"key":"27_CR50","unstructured":"Wang, Z., Yu, J., Yu, A.W., Dai, Z., Tsvetkov, Y., Cao, Y.: Simvlm: simple visual language model pretraining with weak supervision. arXiv preprint arXiv:2108.10904 (2021)"},{"key":"27_CR51","doi-asserted-by":"crossref","unstructured":"Wang, Z., Hu, Y., Liu, S.C.: Exploiting spatial sparsity for event cameras with visual transformers. In: 2022 IEEE International Conference on Image Processing (ICIP), pp. 411\u2013415. IEEE (2022)","DOI":"10.1109\/ICIP46576.2022.9897432"},{"key":"27_CR52","doi-asserted-by":"crossref","unstructured":"Wasim, S.T., Naseer, M., Khan, S., Khan, F.S., Shah, M.: Vita-clip: video and text adaptive clip via multimodal prompting. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 23034\u201323044 (2023)","DOI":"10.1109\/CVPR52729.2023.02206"},{"key":"27_CR53","unstructured":"Wu, Z., Liu, X., Gilitschenski, I.: Eventclip: adapting clip for event-based object recognition. arXiv preprint arXiv:2306.06354 (2023)"},{"key":"27_CR54","doi-asserted-by":"crossref","unstructured":"Xue, L., et al.: Ulip: learning a unified representation of language, images, and point clouds for 3d understanding. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1179\u20131189 (2023)","DOI":"10.1109\/CVPR52729.2023.00120"},{"key":"27_CR55","doi-asserted-by":"crossref","unstructured":"Yang, Y., Pan, L., Liu, L.: Event camera data pre-training. arXiv preprint arXiv:2301.01928 (2023)","DOI":"10.1109\/ICCV51070.2023.00982"},{"key":"27_CR56","doi-asserted-by":"crossref","unstructured":"Yao, H., Zhang, R., Xu, C.: Visual-language prompt tuning with knowledge-guided context optimization. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6757\u20136767 (2023)","DOI":"10.1109\/CVPR52729.2023.00653"},{"key":"27_CR57","unstructured":"Yao, Y., Zhang, A., Zhang, Z., Liu, Z., Chua, T.S., Sun, M.: CPT: colorful prompt tuning for pre-trained vision-language models. arXiv preprint arXiv:2109.11797 (2021)"},{"key":"27_CR58","unstructured":"Zang, Y., Li, W., Zhou, K., Huang, C., Loy, C.C.: Unified vision and language prompt learning. arXiv preprint arXiv:2210.07225 (2022)"},{"key":"27_CR59","doi-asserted-by":"crossref","unstructured":"Zeng, Y., et al.: Clip2: contrastive language-image-point pretraining from real-world point cloud data. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 15244\u201315253 (2023)","DOI":"10.1109\/CVPR52729.2023.01463"},{"key":"27_CR60","doi-asserted-by":"crossref","unstructured":"Zhang, R., et al.: Pointclip: point cloud understanding by clip. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8552\u20138562 (2022)","DOI":"10.1109\/CVPR52688.2022.00836"},{"key":"27_CR61","doi-asserted-by":"crossref","unstructured":"Zhang, R., Zeng, Z., Guo, Z., Li, Y.: Can language understand depth? In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 6868\u20136874 (2022)","DOI":"10.1145\/3503161.3549201"},{"key":"27_CR62","unstructured":"Zheng, X., et al.: Deep learning for event-based vision: a comprehensive survey and benchmarks. arXiv preprint arXiv:2302.08890 (2023)"},{"key":"27_CR63","doi-asserted-by":"crossref","unstructured":"Zhou, K., Yang, J., Loy, C.C., Liu, Z.: Conditional prompt learning for vision-language models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 16816\u201316825 (2022)","DOI":"10.1109\/CVPR52688.2022.01631"},{"issue":"9","key":"27_CR64","doi-asserted-by":"publisher","first-page":"2337","DOI":"10.1007\/s11263-022-01653-1","volume":"130","author":"K Zhou","year":"2022","unstructured":"Zhou, K., Yang, J., Loy, C.C., Liu, Z.: Learning to prompt for vision-language models. Int. J. Comput. Vision 130(9), 2337\u20132348 (2022)","journal-title":"Int. J. Comput. Vision"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72897-6_27","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,1]],"date-time":"2024-12-01T23:21:12Z","timestamp":1733095272000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72897-6_27"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,2]]},"ISBN":["9783031728969","9783031728976"],"references-count":64,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72897-6_27","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12,2]]},"assertion":[{"value":"2 December 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}