{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,17]],"date-time":"2026-01-17T20:42:55Z","timestamp":1768682575482,"version":"3.49.0"},"publisher-location":"Cham","reference-count":69,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031728471","type":"print"},{"value":"9783031728488","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,29]],"date-time":"2024-11-29T00:00:00Z","timestamp":1732838400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,29]],"date-time":"2024-11-29T00:00:00Z","timestamp":1732838400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72848-8_3","type":"book-chapter","created":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T13:35:13Z","timestamp":1732800913000},"page":"36-54","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["HVCLIP: High-Dimensional Vector in\u00a0CLIP for\u00a0Unsupervised Domain Adaptation"],"prefix":"10.1007","author":[{"given":"Noranart","family":"Vesdapunt","sequence":"first","affiliation":[]},{"given":"Kah Kuen","family":"Fu","sequence":"additional","affiliation":[]},{"given":"Yue","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Xu","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Pradeep","family":"Natarajan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,29]]},"reference":[{"key":"3_CR1","unstructured":"Berthelot, D., et al.: RemixMatch: semi-supervised learning with distribution alignment and augmentation anchoring. In: ICLR (2020)"},{"key":"3_CR2","unstructured":"Chen, S., Zhang, Y., Jiang, W., Lu, J., Zhang, Y.: Large language models as visual cross-domain learners (2024)"},{"key":"3_CR3","doi-asserted-by":"publisher","unstructured":"Chung, H.W., et al.: Scaling instruction-finetuned language models (2022). https:\/\/doi.org\/10.48550\/ARXIV.2210.11416, https:\/\/arxiv.org\/abs\/2210.11416","DOI":"10.48550\/ARXIV.2210.11416"},{"key":"3_CR4","doi-asserted-by":"crossref","unstructured":"Cubuk, E.D., Zoph, B., Shlens, J., Le, Q.V.: RandAugment: practical automated data augmentation with a reduced search space. In: CVPRW, pp. 702\u2013703 (2020)","DOI":"10.1109\/CVPRW50498.2020.00359"},{"key":"3_CR5","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248\u2013255. IEEE (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"3_CR6","unstructured":"Ding, Y., Liu, L., Tian, C., Yang, J., Ding, H.: Don\u2019t stop learning: towards continual learning for the CLIP model. arXiv preprint arXiv:2207.09248 (2022)"},{"key":"3_CR7","unstructured":"Donahue, J., et al.: DeCAF: a deep convolutional activation feature for generic visual recognition. In: Xing, E.P., Jebara, T. (eds.) Proceedings of the 31st International Conference on Machine Learning. Proceedings of Machine Learning Research, vol.\u00a032, pp. 647\u2013655. PMLR, Bejing (2014). https:\/\/proceedings.mlr.press\/v32\/donahue14.html"},{"key":"3_CR8","unstructured":"Dosovitskiy, A., et\u00a0al.: An image is worth 16$$\\times $$16 words: transformers for image recognition at scale. In: ICLR (2021)"},{"key":"3_CR9","unstructured":"Gao, P., et al.: CLIP-adapter: better vision-language models with feature adapters. arXiv preprint arXiv:2110.04544 (2021)"},{"key":"3_CR10","doi-asserted-by":"crossref","unstructured":"Gao, Z., Zhang, S., Huang, K., Wang, Q., Zhong, C.: Gradient distribution alignment certificates better adversarial domain adaptation. In: ICCV, pp. 8937\u20138946 (2021)","DOI":"10.1109\/ICCV48922.2021.00881"},{"key":"3_CR11","unstructured":"Hassan, E.T., Chen, X., Crandall, D.J.: Unsupervised domain adaptation using generative models and self-ensembling. CoRR abs\/1812.00479 (2018). http:\/\/arxiv.org\/abs\/1812.00479"},{"key":"3_CR12","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"issue":"2","key":"3_CR13","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1007\/s12559-009-9009-8","volume":"1","author":"P Kanerva","year":"2009","unstructured":"Kanerva, P.: Hyperdimensional computing: an introduction to computing in distributed representation with high-dimensional random vectors. Cogn. Comput. 1(2), 139\u2013159 (2009)","journal-title":"Cogn. Comput."},{"key":"3_CR14","doi-asserted-by":"crossref","unstructured":"Kang, G., Jiang, L., Yang, Y., Hauptmann, A.G.: Contrastive adaptation network for unsupervised domain adaptation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4893\u20134902 (2019)","DOI":"10.1109\/CVPR.2019.00503"},{"key":"3_CR15","unstructured":"Kent, S., Olshausen, B.: A vector symbolic approach to scene transformation. In: Cognitive Computational Neuroscience (CCN\u201917) (Extended Abstract) (2017)"},{"key":"3_CR16","doi-asserted-by":"crossref","unstructured":"Kim, J., Ryoo, K., Seo, J., Lee, G., Kim, D., Cho, H., Kim, S.: Semi-supervised learning of semantic correspondence with pseudo-labels. In: CVPR, pp. 19699\u201319709 (2022)","DOI":"10.1109\/CVPR52688.2022.01908"},{"key":"3_CR17","doi-asserted-by":"publisher","unstructured":"Kolesnikov, A., et al.: Big transfer (BiT): general visual representation learning (2019). https:\/\/doi.org\/10.48550\/ARXIV.1912.11370, https:\/\/arxiv.org\/abs\/1912.11370","DOI":"10.48550\/ARXIV.1912.11370"},{"key":"3_CR18","unstructured":"Krizhevsky, A., Nair, V., Hinton, G.: CIFAR-100 (Canadian institute for advanced research). http:\/\/www.cs.toronto.edu\/~kriz\/cifar.html"},{"key":"3_CR19","doi-asserted-by":"crossref","unstructured":"Lai, Z., et al.: Empowering unsupervised domain adaptation with large-scale pre-trained vision-language models. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV), pp. 2691\u20132701 (2024)","DOI":"10.1109\/WACV57701.2024.00267"},{"key":"3_CR20","doi-asserted-by":"crossref","unstructured":"Lai, Z., et al.: PadCLIP: pseudo-labeling with adaptive debiasing in CLIP for unsupervised domain adaptation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01480"},{"key":"3_CR21","unstructured":"Lai, Z., Wang, C., Gunawan, H., Cheung, S.C.S., Chuah, C.N.: Smoothed adaptive weighting for imbalanced semi-supervised learning: improve reliability against unknown distribution data, In: ICML, pp. 11828\u201311843 (2022)"},{"key":"3_CR22","doi-asserted-by":"crossref","unstructured":"Lee, S., Kim, D., Kim, N., Jeong, S.G.: Drop to adapt: learning discriminative features for unsupervised domain adaptation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 91\u2013100 (2019)","DOI":"10.1109\/ICCV.2019.00018"},{"issue":"11","key":"3_CR23","doi-asserted-by":"publisher","first-page":"3918","DOI":"10.1109\/TPAMI.2020.2991050","volume":"43","author":"J Li","year":"2021","unstructured":"Li, J., Chen, E., Ding, Z., Zhu, L., Lu, K., Shen, H.T.: Maximum density divergence for domain adaptation. IEEE Trans. Pattern Anal. Mach. Intell. 43(11), 3918\u20133930 (2021). https:\/\/doi.org\/10.1109\/TPAMI.2020.2991050","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"3_CR24","unstructured":"Li, S., et al.: Semantic concentration for domain adaptation. In: ICCV, pp. 9102\u20139111 (2021)"},{"key":"3_CR25","unstructured":"Li, Y.J., et al.: Cross-domain adaptive teacher for object detection. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7581\u20137590 (2022)"},{"issue":"12","key":"3_CR26","doi-asserted-by":"publisher","first-page":"2935","DOI":"10.1109\/TPAMI.2017.2773081","volume":"40","author":"Z Li","year":"2017","unstructured":"Li, Z., Hoiem, D.: Learning without forgetting. IEEE Trans. Pattern Anal. Mach. Intell. 40(12), 2935\u20132947 (2017)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"3_CR27","unstructured":"Liang, J., Hu, D., Feng, J.: Do we really need to access the source data? Source hypothesis transfer for unsupervised domain adaptation. In: ICML, pp. 6028\u20136039 (2020)"},{"key":"3_CR28","unstructured":"Lin, T., et al.: Microsoft COCO: common objects in context. CoRR abs\/1405.0312 (2014). http:\/\/arxiv.org\/abs\/1405.0312"},{"key":"3_CR29","first-page":"22968","volume":"34","author":"H Liu","year":"2021","unstructured":"Liu, H., Wang, J., Long, M.: Cycle self-training for domain adaptation. Adv. Neural. Inf. Process. Syst. 34, 22968\u201322981 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"3_CR30","unstructured":"Long, M., Cao, Z., Wang, J., Jordan, M.I.: Conditional adversarial domain adaptation. In: NeurIPS, pp. 1645\u20131655 (2018)"},{"key":"3_CR31","unstructured":"Long, M., Zhu, H., Wang, J., Jordan, M.I.: Deep transfer learning with joint adaptation networks. In: ICML, pp. 2208\u20132217 (2017)"},{"key":"3_CR32","unstructured":"Loshchilov, I., Hutter, F.: SGDR: stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983 (2016)"},{"issue":"9","key":"3_CR33","doi-asserted-by":"publisher","first-page":"5521","DOI":"10.3390\/app13095521","volume":"13","author":"J Maur\u00edcio","year":"2023","unstructured":"Maur\u00edcio, J., Domingues, I., Bernardino, J.: Comparing vision transformers and convolutional neural networks for image classification: a literature review. Appl. Sci. 13(9), 5521 (2023)","journal-title":"Appl. Sci."},{"key":"3_CR34","unstructured":"Micikevicius, P., et\u00a0al.: Mixed precision training. In: ICLR (2018)"},{"key":"3_CR35","unstructured":"mnmoustafa, M.A.: Tiny ImageNet (2017). https:\/\/kaggle.com\/competitions\/tiny-imagenet"},{"key":"3_CR36","unstructured":"Montone, G., O\u2019Regan, J.K., Terekhov, A.V.: Hyper-dimensional computing for a visual question-answering system that is trainable end-to-end. arXiv preprint arXiv:1711.10185 (2017)"},{"key":"3_CR37","doi-asserted-by":"crossref","unstructured":"Na, J., Jung, H., Chang, H.J., Hwang, W.: FixBi: bridging domain spaces for unsupervised domain adaptation (2021)","DOI":"10.1109\/CVPR46437.2021.00115"},{"issue":"4","key":"3_CR38","doi-asserted-by":"publisher","first-page":"319","DOI":"10.1007\/s13218-019-00623-z","volume":"33","author":"P Neubert","year":"2019","unstructured":"Neubert, P., Schubert, S., Protzel, P.: An introduction to hyperdimensional computing for robotics. KI-K\u00fcnstliche Intell. 33(4), 319\u2013330 (2019)","journal-title":"KI-K\u00fcnstliche Intell."},{"key":"3_CR39","doi-asserted-by":"crossref","unstructured":"Peng, X., Bai, Q., Xia, X., Huang, Z., Saenko, K., Wang, B.: Moment matching for multi-source domain adaptation. In: ICCV, pp. 1406\u20131415 (2019)","DOI":"10.1109\/ICCV.2019.00149"},{"key":"3_CR40","unstructured":"Peng, X., Usman, B., Kaushik, N., Hoffman, J., Wang, D., Saenko, K.: VisDA: the visual domain adaptation challenge. arXiv preprint arXiv:1710.06924 (2017)"},{"key":"3_CR41","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: ICML, pp. 8748\u20138763 (2021)"},{"key":"3_CR42","unstructured":"Rangwani, H., Aithal, S.K., Mishra, M., Jain, A., Radhakrishnan, V.B.: A closer look at smoothness in domain adversarial training. In: ICML, pp. 18378\u201318399 (2022)"},{"key":"3_CR43","doi-asserted-by":"crossref","unstructured":"Rebuffi, S.A., Kolesnikov, A., Sperl, G., Lampert, C.H.: iCaRL: incremental classifier and representation learning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2001\u20132010 (2017)","DOI":"10.1109\/CVPR.2017.587"},{"key":"3_CR44","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"213","DOI":"10.1007\/978-3-642-15561-1_16","volume-title":"Computer Vision \u2013 ECCV 2010","author":"K Saenko","year":"2010","unstructured":"Saenko, K., Kulis, B., Fritz, M., Darrell, T.: Adapting visual category models to new domains. In: Daniilidis, K., Maragos, P., Paragios, N. (eds.) ECCV 2010. LNCS, vol. 6314, pp. 213\u2013226. Springer, Heidelberg (2010). https:\/\/doi.org\/10.1007\/978-3-642-15561-1_16"},{"key":"3_CR45","unstructured":"Schuhmann, C., et al.: LAION-5B: an open large-scale dataset for training next generation image-text models. In: Koyejo, S., Mohamed, S., Agarwal, A., Belgrave, D., Cho, K., Oh, A. (eds.) Advances in Neural Information Processing Systems, vol.\u00a035, pp. 25278\u201325294. Curran Associates, Inc. (2022), https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2022\/file\/a1859debfb3b59d094f3504d5ebb6c25-Paper-Datasets_and_Benchmarks.pdf"},{"key":"3_CR46","doi-asserted-by":"crossref","unstructured":"Singha, M., Pal, H., Jha, A., Banerjee, B.: AD-CLIP: adapting domains in prompt space using CLIP. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV) Workshops, pp. 4355\u20134364 (October 2023)","DOI":"10.1109\/ICCVW60793.2023.00470"},{"key":"3_CR47","unstructured":"Sohn, K., et al.: FixMatch: simplifying semi-supervised learning with consistency and confidence. In: NeurIPS (2020)"},{"key":"3_CR48","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"443","DOI":"10.1007\/978-3-319-49409-8_35","volume-title":"Computer Vision \u2013 ECCV 2016 Workshops","author":"B Sun","year":"2016","unstructured":"Sun, B., Saenko, K.: Deep CORAL: correlation alignment for deep domain adaptation. In: Hua, G., J\u00e9gou, H. (eds.) ECCV 2016. LNCS, vol. 9915, pp. 443\u2013450. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-49409-8_35"},{"key":"3_CR49","doi-asserted-by":"crossref","unstructured":"Sun, T., Lu, C., Zhang, T., Ling, H.: Safe self-refinement for transformer-based domain adaptation. In: CVPR, pp. 7191\u20137200 (2022)","DOI":"10.1109\/CVPR52688.2022.00705"},{"key":"3_CR50","doi-asserted-by":"crossref","unstructured":"Tang, S., Su, W., Ye, M., Zhu, X.: Source-free domain adaptation with frozen multimodal foundation model (2023)","DOI":"10.1109\/CVPR52733.2024.02238"},{"key":"3_CR51","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"17","DOI":"10.1007\/978-3-031-19803-8_2","volume-title":"Computer Vision - ECCV 2022","author":"J Theiss","year":"2022","unstructured":"Theiss, J., Leverett, J., Kim, D., Prakash, A.: Unpaired image translation via vector symbolic architectures. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13681, pp. 17\u201332. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19803-8_2"},{"key":"3_CR52","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., Jegou, H.: Training data-efficient image transformers & distillation through attention. In: ICML, vol.\u00a0139, pp. 10347\u201310357 (2021)"},{"key":"3_CR53","unstructured":"Tzeng, E., Hoffman, J., Zhang, N., Saenko, K., Darrell, T.: Deep domain confusion: maximizing for domain invariance. arXiv preprint arXiv:1412.3474 (2014)"},{"key":"3_CR54","doi-asserted-by":"crossref","unstructured":"Venkateswara, H., Eusebio, J., Chakraborty, S., Panchanathan, S.: Deep hashing network for unsupervised domain adaptation. In: CVPR, pp. 5018\u20135027 (2017)","DOI":"10.1109\/CVPR.2017.572"},{"key":"3_CR55","doi-asserted-by":"crossref","unstructured":"Wang, Q., Meng, F., Breckon, T.P.: Data augmentation with norm-ae and selective pseudo-labelling for unsupervised domain adaptation. Neural Netw. 161, 614\u2013625 (2023)","DOI":"10.1016\/j.neunet.2023.02.006"},{"key":"3_CR56","doi-asserted-by":"crossref","unstructured":"Wang, T., Ding, Z., Shao, W., Tang, H., Huang, K.: Towards fair cross-domain adaptation via generative learning. In: WACV, pp. 454\u2013463 (2021)","DOI":"10.1109\/WACV48630.2021.00050"},{"key":"3_CR57","unstructured":"Wang, X., Jin, Y., Long, M., Wang, J., Jordan, M.: Transferable normalization: towards improving transferability of deep neural networks. In: NeurIPS (2019)"},{"key":"3_CR58","unstructured":"Westfechtel, T., Zhang, D., Harada, T.: Combining inherent knowledge of vision-language models with unsupervised domain adaptation through self-knowledge distillation (2023)"},{"key":"3_CR59","doi-asserted-by":"crossref","unstructured":"Xu, M., et al.: Adversarial domain adaptation with domain mixup. In: AAAI, pp. 6502\u20136509 (2020)","DOI":"10.1609\/aaai.v34i04.6123"},{"key":"3_CR60","unstructured":"Xu, T., Chen, W., Wang, P., Wang, F., Li, H., Jin, R.: CDTrans: cross-domain transformer for unsupervised domain adaptation. In: ICLR (2022)"},{"key":"3_CR61","doi-asserted-by":"crossref","unstructured":"Yang, J., Liu, J., Xu, N., Huang, J.: TVT: transferable vision transformer for unsupervised domain adaptation. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 520\u2013530 (2023)","DOI":"10.1109\/WACV56688.2023.00059"},{"key":"3_CR62","unstructured":"Yang, S., Wang, Y., Wang, K., Jui, S., et\u00a0al.: Attracting and dispersing: a simple approach for source-free domain adaptation. In: Advances in Neural Information Processing Systems (2022)"},{"key":"3_CR63","unstructured":"Zhang, B., et al.: FlexMatch: boosting semi-supervised learning with curriculum pseudo labeling. In: NeurIPS, vol. 34, pp. 18408\u201318419 (2021)"},{"key":"3_CR64","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"493","DOI":"10.1007\/978-3-031-19833-5_29","volume-title":"Computer Vision \u2013 ECCV 2022","author":"R Zhang","year":"2022","unstructured":"Zhang, R., et al.: Tip-adapter: training-free adaption of CLIP for few-shot classification. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13695, pp. 493\u2013510. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19833-5_29"},{"key":"3_CR65","unstructured":"Zhang, Y., Deng, B., Tang, H., Zhang, L., Jia, K.: Unsupervised multi-class domain adaptation: theory, algorithms, and practice. IEEE Trans. Pattern Anal. Mach. Intell. (2020)"},{"key":"3_CR66","doi-asserted-by":"crossref","unstructured":"Zheng, Z., Ma, M., Wang, K., Qin, Z., Yue, X., You, Y.: Preventing zero-shot transfer degradation in continual learning of vision-language models. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 19125\u201319136 (2023)","DOI":"10.1109\/ICCV51070.2023.01752"},{"key":"3_CR67","unstructured":"Zhou, K., Yang, J., Loy, C.C., Liu, Z.: Learning to prompt for vision-language models. Int. J. Comput. Vis. 1\u201312 (2022)"},{"key":"3_CR68","doi-asserted-by":"crossref","unstructured":"Zhu, J., Bai, H., Wang, L.: Patch-mix transformer for unsupervised domain adaptation: a game perspective. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3561\u20133571 (2023)","DOI":"10.1109\/CVPR52729.2023.00347"},{"key":"3_CR69","doi-asserted-by":"crossref","unstructured":"Zou, Y., Yu, Z., Liu, X., Kumar, B., Wang, J.: Confidence regularized self-training. In: ICCV, pp. 5982\u20135991 (2019)","DOI":"10.1109\/ICCV.2019.00608"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72848-8_3","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,28]],"date-time":"2024-11-28T14:04:56Z","timestamp":1732802696000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72848-8_3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,29]]},"ISBN":["9783031728471","9783031728488"],"references-count":69,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72848-8_3","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,29]]},"assertion":[{"value":"29 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}