{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,19]],"date-time":"2026-02-19T15:36:29Z","timestamp":1771515389179,"version":"3.50.1"},"publisher-location":"Cham","reference-count":71,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031726422","type":"print"},{"value":"9783031726439","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,22]],"date-time":"2024-11-22T00:00:00Z","timestamp":1732233600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,22]],"date-time":"2024-11-22T00:00:00Z","timestamp":1732233600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72643-9_26","type":"book-chapter","created":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T21:23:45Z","timestamp":1732224225000},"page":"440-458","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["Efficient and\u00a0Versatile Robust Fine-Tuning of\u00a0Zero-Shot Models"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-6919-4822","authenticated-orcid":false,"given":"Sungyeon","family":"Kim","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9382-3396","authenticated-orcid":false,"given":"Boseung","family":"Jeong","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7132-4454","authenticated-orcid":false,"given":"Donghyun","family":"Kim","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4567-9091","authenticated-orcid":false,"given":"Suha","family":"Kwak","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,22]]},"reference":[{"key":"26_CR1","unstructured":"Barbu, A., et al.: Objectnet: A large-scale bias-controlled dataset for pushing the limits of object recognition models. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"26_CR2","doi-asserted-by":"crossref","unstructured":"Caesar, H., Uijlings, J., Ferrari, V.: Coco-stuff: Thing and stuff classes in context. In: Proc. IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)","DOI":"10.1109\/CVPR.2018.00132"},{"key":"26_CR3","volume-title":"Swad: Domain Generalization By Seeking Flat Minima","author":"J Cha","year":"2021","unstructured":"Cha, J., et al.: Swad: Domain Generalization By Seeking Flat Minima. Proc, Neural Information Processing Systems (NeurIPS) (2021)"},{"key":"26_CR4","doi-asserted-by":"crossref","unstructured":"Chen, G., Liu, F., Meng, Z., Liang, S.: Revisiting parameter-efficient tuning: Are we really there yet? arXiv preprint arXiv:2202.07962 (2022)","DOI":"10.18653\/v1\/2022.emnlp-main.168"},{"key":"26_CR5","unstructured":"Chen, S., et al.: Adaptformer: adapting vision transformers for scalable visual recognition. In: Proceedings of Neural Information Processing Systems (NeurIPS) (2022)"},{"key":"26_CR6","unstructured":"Chen, X., Fan, H., Girshick, R., He, K.: Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297 (2020)"},{"key":"26_CR7","unstructured":"Chen, X., et al.: Microsoft coco captions: Data collection and evaluation server. arXiv preprint arXiv:1504.00325 (2015)"},{"key":"26_CR8","doi-asserted-by":"crossref","unstructured":"Chen, Y.C., et al.: Uniter: universal image-text representation learning. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"26_CR9","unstructured":"Cheng, B., Schwing, A., Kirillov, A.: Per-pixel classification is not all you need for semantic segmentation. In: Advances in Neural Information Processing Systems (2021)"},{"key":"26_CR10","unstructured":"Chopra, S., Hadsell, R., LeCun, Y.: Learning a similarity metric discriminatively, with application to face verification. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, (CVPR) (2005)"},{"key":"26_CR11","unstructured":"Dai, B., Lin, D.: Contrastive learning for image captioning. In: Advances in Neural Information Processing Systems, pp. 898\u2013907 (2017)"},{"key":"26_CR12","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"26_CR13","unstructured":"Dettmers, T., Pagnoni, A., Holtzman, A., Zettlemoyer, L.: Qlora: Efficient finetuning of quantized llms. arXiv preprint arXiv:2305.14314 (2023)"},{"key":"26_CR14","doi-asserted-by":"crossref","unstructured":"Ding, J., Xue, N., Xia, G.S., Dai, D.: Decoupling zero-shot semantic segmentation. In: Proceeding of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)","DOI":"10.1109\/CVPR52688.2022.01129"},{"key":"26_CR15","unstructured":"Dosovitskiy, A., et\u00a0al.: An image is worth 16x16 words: Transformers for image recognition at scale. In: Proceeding of the International Conference on Learning Representations (ICLR) (2021)"},{"key":"26_CR16","doi-asserted-by":"crossref","unstructured":"Everingham, M., Van\u00a0Gool, L., Williams, C.K., Winn, J., Zisserman, A.: The Pascal Visual Object Classes (VOC) Challenge. In: International Journal of Computer Vision (IJCV) (2010)","DOI":"10.1007\/s11263-009-0275-4"},{"key":"26_CR17","unstructured":"Gan, Z., Chen, Y.C., Li, L., Zhu, C., Cheng, Y., Liu, J.: Large-scale adversarial training for vision-and-language representation learning. In: Advances in Neural Information Processing Systems (2020)"},{"key":"26_CR18","doi-asserted-by":"crossref","unstructured":"Ghiasi, G., Gu, X., Cui, Y., Lin, T.Y.: Scaling open-vocabulary image segmentation with image-level labels. In: Proceeding of the European Conference on Computer Vision (ECCV) (2022)","DOI":"10.1007\/978-3-031-20059-5_31"},{"key":"26_CR19","doi-asserted-by":"crossref","unstructured":"Goyal, S., Kumar, A., Garg, S., Kolter, Z., Raghunathan, A.: Finetune like you pretrain: improved finetuning of zero-shot vision models. In: Proceeding of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)","DOI":"10.1109\/CVPR52729.2023.01853"},{"key":"26_CR20","unstructured":"Graf, F., Hofer, C., Niethammer, M., Kwitt, R.: Dissecting supervised contrastive learning. In: International Conference on Machine Learning, pp. 3821\u20133830. PMLR (2021)"},{"key":"26_CR21","unstructured":"He, J., Zhou, C., Ma, X., Berg-Kirkpatrick, T., Neubig, G.: Towards a unified view of parameter-efficient transfer learning. arXiv preprint arXiv:2110.04366 (2021)"},{"key":"26_CR22","doi-asserted-by":"crossref","unstructured":"He, K., Fan, H., Wu, Y., Xie, S., Girshick, R.: Momentum contrast for unsupervised visual representation learning. In: Proceeding of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2020)","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"26_CR23","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceeding of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (June 2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"26_CR24","doi-asserted-by":"crossref","unstructured":"Hendrycks, D., et\u00a0al.: The many faces of robustness: a critical analysis of out-of-distribution generalization. In: Proc.eeding of the IEEE International Conference on Computer Vision (ICCV) (2021)","DOI":"10.1109\/ICCV48922.2021.00823"},{"key":"26_CR25","doi-asserted-by":"crossref","unstructured":"Hendrycks, D., Zhao, K., Basart, S., Steinhardt, J., Song, D.: Natural adversarial examples. In: Proceeding of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)","DOI":"10.1109\/CVPR46437.2021.01501"},{"key":"26_CR26","unstructured":"Houlsby, N., et al.: Parameter-efficient transfer learning for nlp. In: Proceeding of the International Conference on Machine Learning (ICML). PMLR (2019)"},{"key":"26_CR27","unstructured":"Hu, E.J., et al.: Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021)"},{"key":"26_CR28","unstructured":"Izmailov, P., Podoprikhin, D., Garipov, T., Vetrov, D., Wilson, A.G.: Averaging weights leads to wider optima and better generalization (2018)"},{"key":"26_CR29","unstructured":"Jia, C., et al.: Scaling up visual and vision-language representation learning with noisy text supervision. In: Proceeding of the International Conference on Machine Learning (ICML) (2021)"},{"key":"26_CR30","doi-asserted-by":"publisher","unstructured":"Jia, M., et al.: Visual prompt tuning. In: Proceeding of the European Conference on Computer Vision (ECCV). Springer (2022). https:\/\/doi.org\/10.1007\/978-3-031-19827-4_41","DOI":"10.1007\/978-3-031-19827-4_41"},{"key":"26_CR31","doi-asserted-by":"crossref","unstructured":"Khattak, M.U., Rasheed, H., Maaz, M., Khan, S., Khan, F.S.: Maple: multi-modal prompt learning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)","DOI":"10.1109\/CVPR52729.2023.01832"},{"key":"26_CR32","unstructured":"Khosla, P., et al.: Supervised contrastive learning. In: Proceedings of the Neural Information Processing Systems (NeurIPS) (2020)"},{"key":"26_CR33","unstructured":"Kim, S., Kim, D., Kwak, S.: Universal metric learning with parameter-efficient transfer learning. arXiv preprint arXiv:2309.08944 (2023)"},{"key":"26_CR34","doi-asserted-by":"crossref","unstructured":"Kim, S., Kim, D., Cho, M., Kwak, S.: Embedding transfer with label relaxation for improved metric learning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2021)","DOI":"10.1109\/CVPR46437.2021.00396"},{"key":"26_CR35","unstructured":"Kumar, A., Raghunathan, A., Jones, R., Ma, T., Liang, P.: Fine-tuning can distort pretrained features and underperform out-of-distribution. arXiv preprint arXiv:2202.10054 (2022)"},{"key":"26_CR36","unstructured":"Larsson, G., Maire, M., Shakhnarovich, G.: Fractalnet: Ultra-deep neural networks without residuals. In: International Conference on Learning Representations (2016)"},{"key":"26_CR37","doi-asserted-by":"crossref","unstructured":"Lester, B., Al-Rfou, R., Constant, N.: The power of scale for parameter-efficient prompt tuning. In: Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"26_CR38","doi-asserted-by":"crossref","unstructured":"Li, G., Duan, N., Fang, Y., Gong, M., Jiang, D.: Unicoder-vl: a universal encoder for vision and language by cross-modal pre-training. In: Proceedings of the AAAI Conference on Artificial Intelligence (AAAI) (2020)","DOI":"10.1609\/aaai.v34i07.6795"},{"key":"26_CR39","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: Blip: bootstrapping language-image pre-training for unified vision-language understanding and generation. In: Proceedings of the International Conference on Machine Learning (ICML). PMLR (2022)"},{"key":"26_CR40","doi-asserted-by":"crossref","unstructured":"Li, X.L., Liang, P.: Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190 (2021)","DOI":"10.18653\/v1\/2021.acl-long.353"},{"key":"26_CR41","doi-asserted-by":"crossref","unstructured":"Li, X., et\u00a0al.: Oscar: Object-semantics aligned pre-training for vision-language tasks. In: Proceedings of the European Conference on Computer Vision (ECCV) (2020)","DOI":"10.1007\/978-3-030-58577-8_8"},{"key":"26_CR42","first-page":"109","volume":"35","author":"D Lian","year":"2022","unstructured":"Lian, D., Zhou, D., Feng, J., Wang, X.: Scaling and shifting your features: A new baseline for efficient model tuning. Adv. Neural. Inf. Process. Syst. 35, 109\u2013123 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"26_CR43","doi-asserted-by":"crossref","unstructured":"Liang, F., et al.: Open-vocabulary semantic segmentation with mask-adapted clip. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)","DOI":"10.1109\/CVPR52729.2023.00682"},{"key":"26_CR44","doi-asserted-by":"crossref","unstructured":"Lin, T.Y., et al.: Microsoft COCO: common objects in context. In: Proceedings of the European Conference on Computer Vision (ECCV) (2014)","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"26_CR45","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"26_CR46","unstructured":"Luo, G., et al.: Towards efficient visual adaption via structural re-parameterization. arXiv preprint arXiv:2302.08106 (2023)"},{"key":"26_CR47","doi-asserted-by":"crossref","unstructured":"Mottaghi, R., et al.: The role of context for object detection and semantic segmentation in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2014)","DOI":"10.1109\/CVPR.2014.119"},{"key":"26_CR48","unstructured":"Oord, A.v.d., Li, Y., Vinyals, O.: Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748 (2018)"},{"key":"26_CR49","doi-asserted-by":"crossref","unstructured":"Pfeiffer, J., Kamath, A., R\u00fcckl\u00e9, A., Cho, K., Gurevych, I.: Adapterfusion: Non-destructive task composition for transfer learning. arXiv preprint arXiv:2005.00247 (2020)","DOI":"10.18653\/v1\/2021.eacl-main.39"},{"key":"26_CR50","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: Proceedings of the International Conference on Machine Learning (ICML) (2021)"},{"key":"26_CR51","unstructured":"Rebuffi, S.A., Bilen, H., Vedaldi, A.: Learning multiple visual domains with residual adapters. In: Proceedings of the Neural Information Processing Systems (NeurIPS) (2017)"},{"key":"26_CR52","unstructured":"Recht, B., Roelofs, R., Schmidt, L., Shankar, V.: Do imagenet classifiers generalize to imagenet? In: Proc.eedings of the International Conference on Machine Learning (ICML). PMLR (2019)"},{"key":"26_CR53","doi-asserted-by":"crossref","unstructured":"Sarto, S., Barraco, M., Cornia, M., Baraldi, L., Cucchiara, R.: Positive-augmented contrastive learning for image and video captioning evaluation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6914\u20136924 (2023)","DOI":"10.1109\/CVPR52729.2023.00668"},{"key":"26_CR54","unstructured":"Shu, Y., Guo, X., Wu, J., Wang, X., Wang, J., Long, M.: Clipood: generalizing clip to out-of-distributions. In: Proceedings of the International Conference on Machine Learning (ICML) (2023)"},{"key":"26_CR55","doi-asserted-by":"crossref","unstructured":"Smith, J.S., et al.: Coda-prompt: Continual decomposed attention-based prompting for rehearsal-free continual learning. arXiv preprint arXiv:2211.13218 (2022)","DOI":"10.1109\/CVPR52729.2023.01146"},{"key":"26_CR56","unstructured":"Sohn, K.: Improved deep metric learning with multi-class n-pair loss objective. In: Proceedings of the Neural Information Processing Systems (NeurIPS) (2016)"},{"key":"26_CR57","first-page":"1929","volume":"15","author":"N Srivastava","year":"2014","unstructured":"Srivastava, N., Hinton, G., Krizhevsky, A., Sutskever, I., Salakhutdinov, R.: Dropout: a simple way to prevent neural networks from overfitting. J. Mach. Learn. Res. (JMLR) 15, 1929\u20131958 (2014)","journal-title":"J. Mach. Learn. Res. (JMLR)"},{"key":"26_CR58","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z.: Rethinking the inception architecture for computer vision. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2016)","DOI":"10.1109\/CVPR.2016.308"},{"key":"26_CR59","unstructured":"Tan, M., Le, Q.: Efficientnet: rethinking model scaling for convolutional neural networks. In: Proceedings of the International Conference on Machine Learning (ICML) (2019)"},{"key":"26_CR60","volume-title":"Attention is all you need","author":"A Vaswani","year":"2017","unstructured":"Vaswani, A., et al.: Attention is all you need. Proc, Neural Information Processing Systems (NeurIPS) (2017)"},{"key":"26_CR61","unstructured":"Wang, H., Ge, S., Lipton, Z., Xing, E.P.: Learning robust global representations by penalizing local predictive power. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"26_CR62","doi-asserted-by":"crossref","unstructured":"Wang, Z., et al.: Learning to prompt for continual learning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)","DOI":"10.1109\/CVPR52688.2022.00024"},{"key":"26_CR63","unstructured":"Wortsman, M., et\u00a0al.: Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time. In: Proceedings of the International Conference on Machine Learning (ICML) (2022)"},{"key":"26_CR64","doi-asserted-by":"crossref","unstructured":"Wortsman, M., et\u00a0al.: Robust fine-tuning of zero-shot models. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)","DOI":"10.1109\/CVPR52688.2022.00780"},{"key":"26_CR65","doi-asserted-by":"crossref","unstructured":"Wu, Z., Xiong, Y., Yu, S.X., Lin, D.: Unsupervised feature learning via non-parametric instance discrimination. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)","DOI":"10.1109\/CVPR.2018.00393"},{"key":"26_CR66","doi-asserted-by":"crossref","unstructured":"Xiao, Y., Tang, Z., Wei, P., Liu, C., Lin, L.: Masked images are counterfactual samples for robust fine-tuning. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2023)","DOI":"10.1109\/CVPR52729.2023.01944"},{"key":"26_CR67","doi-asserted-by":"crossref","unstructured":"Young, P., Lai, A., Hodosh, M., Hockenmaier, J.: From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics (2014)","DOI":"10.1162\/tacl_a_00166"},{"key":"26_CR68","doi-asserted-by":"crossref","unstructured":"Yu, F., et al.: Ernie-vil: knowledge enhanced vision-language representations through scene graphs. In: Proceedings of the AAAI Conference on Artificial Intelligence (AAAI) (2021)","DOI":"10.1609\/aaai.v35i4.16431"},{"key":"26_CR69","doi-asserted-by":"crossref","unstructured":"Zhou, B., et al.: Semantic understanding of scenes through the ade20k dataset. In: International Journal of Computer Vision (IJCV) (2019)","DOI":"10.1007\/s11263-018-1140-0"},{"key":"26_CR70","doi-asserted-by":"crossref","unstructured":"Zhou, K., Yang, J., Loy, C.C., Liu, Z.: Conditional prompt learning for vision-language models. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022)","DOI":"10.1109\/CVPR52688.2022.01631"},{"key":"26_CR71","doi-asserted-by":"crossref","unstructured":"Zhou, K., Yang, J., Loy, C.C., Liu, Z.: Learning to prompt for vision-language models. Int. J. Comput. Vision 130, 2337\u20132348 (IJCV) (2022)","DOI":"10.1007\/s11263-022-01653-1"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72643-9_26","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T21:28:44Z","timestamp":1732224524000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72643-9_26"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,22]]},"ISBN":["9783031726422","9783031726439"],"references-count":71,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72643-9_26","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,22]]},"assertion":[{"value":"22 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}