{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,26]],"date-time":"2026-02-26T14:53:19Z","timestamp":1772117599890,"version":"3.50.1"},"publisher-location":"Cham","reference-count":88,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031731150","type":"print"},{"value":"9783031731167","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,10,31]],"date-time":"2024-10-31T00:00:00Z","timestamp":1730332800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,31]],"date-time":"2024-10-31T00:00:00Z","timestamp":1730332800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73116-7_23","type":"book-chapter","created":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T15:15:38Z","timestamp":1730301338000},"page":"395-413","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["CLIP-DPO: Vision-Language Models as\u00a0a\u00a0Source of\u00a0Preference for\u00a0Fixing Hallucinations in\u00a0LVLMs"],"prefix":"10.1007","author":[{"given":"Yassine","family":"Ouali","sequence":"first","affiliation":[]},{"given":"Adrian","family":"Bulat","sequence":"additional","affiliation":[]},{"given":"Brais","family":"Martinez","sequence":"additional","affiliation":[]},{"given":"Georgios","family":"Tzimiropoulos","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,31]]},"reference":[{"key":"23_CR1","unstructured":"Detailed caption dataset. https:\/\/huggingface.co\/datasets\/echo840\/Detailed_Caption (2024)"},{"key":"23_CR2","unstructured":"Achiam, J., et\u00a0al.: GPT-4 technical report. arXiv preprint arXiv:2303.08774 (2023)"},{"key":"23_CR3","unstructured":"Awadalla, A., et\u00a0al.: OpenFlamingo: an open-source framework for training large autoregressive vision-language models. arXiv preprint arXiv:2308.01390 (2023)"},{"key":"23_CR4","unstructured":"Azar, M.G., et al.: A general theoretical paradigm to understand learning from human preferences. arXiv preprint arXiv:2310.12036 (2023)"},{"key":"23_CR5","unstructured":"Bai, J., et al.: Qwen-VL: a versatile vision-language model for understanding, localization, text reading, and beyond. arXiv preprint arXiv:2308.12966 (2023)"},{"key":"23_CR6","doi-asserted-by":"crossref","unstructured":"Bossard, L., Guillaumin, M., Van\u00a0Gool, L.: Food-101\u2013mining discriminative components with random forests. In: European Conference on Computer Vision (2014)","DOI":"10.1007\/978-3-319-10599-4_29"},{"key":"23_CR7","unstructured":"Brown, T., et\u00a0al.: Language models are few-shot learners. In: Advances on Neural Information Processing Systems (2020)"},{"key":"23_CR8","unstructured":"Chen, J., et al.: MiniGPT-v2: large language model as a unified interface for vision-language multi-task learning. arXiv preprint arXiv:2310.09478 (2023)"},{"key":"23_CR9","unstructured":"Chen, K., Zhang, Z., Zeng, W., Zhang, R., Zhu, F., Zhao, R.: Shikra: unleashing multimodal LLM\u2019s referential dialogue magic. arXiv preprint arXiv:2306.15195 (2023)"},{"key":"23_CR10","doi-asserted-by":"crossref","unstructured":"Chen, L., et al.: ShareGPT4v: improving large multi-modal models with better captions. arXiv preprint arXiv:2311.12793 (2023)","DOI":"10.1007\/978-3-031-72643-9_22"},{"key":"23_CR11","doi-asserted-by":"crossref","unstructured":"Chen, Z., et\u00a0al.: InternVL: scaling up vision foundation models and aligning for generic visual-linguistic tasks. arXiv preprint arXiv:2312.14238 (2023)","DOI":"10.1109\/CVPR52733.2024.02283"},{"key":"23_CR12","unstructured":"Chiang, W.L., et\u00a0al.: Vicuna: an open-source chatbot impressing GPT-4 with 90%* chatgpt quality. See https:\/\/vicuna.lmsys.org (2023). Accessed 14 Apr. 2023"},{"key":"23_CR13","unstructured":"Chu, X., et\u00a0al.: MobileVLM: a fast, reproducible and strong vision language assistant for mobile devices. arXiv preprint arXiv:2312.16886 (2023)"},{"key":"23_CR14","unstructured":"Chu, X., et\u00a0al.: MobileVLM V2: faster and stronger baseline for vision language model. arXiv preprint arXiv:2402.03766 (2024)"},{"key":"23_CR15","unstructured":"Chung, H.W., et\u00a0al.: Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416 (2022)"},{"key":"23_CR16","unstructured":"Dai, W., et al.: InstructBLIP: towards general-purpose vision-language models with instruction tuning. arXiv preprint arXiv:2305.06500 (2023)"},{"key":"23_CR17","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: IEEE Conference on Computer Vision and Pattern Recognition (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"23_CR18","unstructured":"Ethayarajh, K., Xu, W., Muennighoff, N., Jurafsky, D., Kiela, D.: KTO: model alignment as prospect theoretic optimization. arXiv preprint arXiv:2402.01306 (2024)"},{"key":"23_CR19","unstructured":"Fang, A., Jose, A.M., Jain, A., Schmidt, L., Toshev, A., Shankar, V.: Data filtering networks. arXiv preprint arXiv:2309.17425 (2023)"},{"key":"23_CR20","unstructured":"Fei-Fei, L., Fergus, R., Perona, P.: Learning generative visual models from few training examples: an incremental Bayesian approach tested on 101 object categories. In: IEEE Conference on Computer Vision and Pattern Recognition - Workshops (2004)"},{"key":"23_CR21","unstructured":"Fu, C., et\u00a0al.: MME: a comprehensive evaluation benchmark for multimodal large language models. arXiv preprint arXiv:2306.13394 (2023)"},{"key":"23_CR22","doi-asserted-by":"crossref","unstructured":"Gunjal, A., Yin, J., Bas, E.: Detecting and preventing hallucinations in large vision language models. arXiv preprint arXiv:2308.06394 (2023)","DOI":"10.1609\/aaai.v38i16.29771"},{"issue":"7","key":"23_CR23","doi-asserted-by":"publisher","first-page":"2217","DOI":"10.1109\/JSTARS.2019.2918242","volume":"12","author":"P Helber","year":"2019","unstructured":"Helber, P., Bischke, B., Dengel, A., Borth, D.: EuroSAT: a novel dataset and deep learning benchmark for land use and land cover classification. IEEE J. Sel. Top. Appl. Earth Observations Remote Sens. 12(7), 2217\u20132226 (2019)","journal-title":"IEEE J. Sel. Top. Appl. Earth Observations Remote Sens."},{"key":"23_CR24","unstructured":"Hu, E.J., et al.: LoRA: low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021)"},{"key":"23_CR25","unstructured":"Hu, H., Zhang, J., Zhao, M., Sun, Z.: CIEM: contrastive instruction evaluation method for better instruction tuning. arXiv preprint arXiv:2309.02301 (2023)"},{"key":"23_CR26","doi-asserted-by":"crossref","unstructured":"Hudson, D.A., Manning, C.D.: GQA: a new dataset for real-world visual reasoning and compositional question answering. In: IEEE Conference on Computer Vision and Pattern Recognition (2019)","DOI":"10.1109\/CVPR.2019.00686"},{"key":"23_CR27","doi-asserted-by":"crossref","unstructured":"Jain, J., Yang, J., Shi, H.: VCoder: versatile vision encoders for multimodal large language models. arXiv preprint arXiv:2312.14233 (2023)","DOI":"10.1109\/CVPR52733.2024.02644"},{"key":"23_CR28","unstructured":"Jiang, A.Q., et\u00a0al.: Mistral 7B. arXiv preprint arXiv:2310.06825 (2023)"},{"key":"23_CR29","doi-asserted-by":"crossref","unstructured":"Jiang, C., et al.: Hallucination augmented contrastive learning for multimodal large language model. arXiv preprint arXiv:2312.06968 (2023)","DOI":"10.1109\/CVPR52733.2024.02553"},{"key":"23_CR30","doi-asserted-by":"crossref","unstructured":"Jing, L., Li, R., Chen, Y., Jia, M., Du, X.: FaithScore: evaluating hallucinations in large vision-language models. arXiv preprint arXiv:2311.01477 (2023)","DOI":"10.18653\/v1\/2024.findings-emnlp.290"},{"key":"23_CR31","unstructured":"Koh, J.Y., Salakhutdinov, R., Fried, D.: Grounding language models to images for multimodal inputs and outputs. International Conference on Machine Learning (2023)"},{"key":"23_CR32","doi-asserted-by":"crossref","unstructured":"Krause, J., Stark, M., Deng, J., Fei-Fei, L.: 3D object representations for fine-grained categorization. In: IEEE International Conference on Computer Vision - Workshops (2013)","DOI":"10.1109\/ICCVW.2013.77"},{"key":"23_CR33","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597 (2023)"},{"key":"23_CR34","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: BLIP: bootstrapping language-image pre-training for unified vision-language understanding and generation. In: International Conference on Learning Representations (2022)"},{"key":"23_CR35","unstructured":"Li, L., et al.: Silkie: preference distillation for large visual language models. arXiv preprint arXiv:2312.10665 (2023)"},{"key":"23_CR36","doi-asserted-by":"crossref","unstructured":"Li, S., Lin, R., Pei, S.: Multi-modal preference alignment remedies regression of visual instruction tuning on language model. arXiv preprint arXiv:2402.10884 (2024)","DOI":"10.18653\/v1\/2024.acl-long.765"},{"key":"23_CR37","doi-asserted-by":"crossref","unstructured":"Li, Y., Du, Y., Zhou, K., Wang, J., Zhao, W.X., Wen, J.R.: Evaluating object hallucination in large vision-language models. arXiv preprint arXiv:2305.10355 (2023)","DOI":"10.18653\/v1\/2023.emnlp-main.20"},{"key":"23_CR38","unstructured":"Li, Y., Bubeck, S., Eldan, R., Del\u00a0Giorno, A., Gunasekar, S., Lee, Y.T.: Textbooks are all you need ii: phi-1.5 technical report. arXiv preprint arXiv:2309.05463 (2023)"},{"key":"23_CR39","unstructured":"Lin, B., et al.: MoE-LLaVA: mixture of experts for large vision-language models. arXiv preprint arXiv:2401.15947 (2024)"},{"key":"23_CR40","unstructured":"Liu, F., Lin, K., Li, L., Wang, J., Yacoob, Y., Wang, L.: Mitigating hallucination in large multi-modal models via robust instruction tuning. In: International Conference on Learning Representations (2023)"},{"key":"23_CR41","doi-asserted-by":"crossref","unstructured":"Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning. arXiv preprint arXiv:2310.03744 (2023)","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"23_CR42","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. In: Advances on Neural Information Processing Systems (2024)"},{"key":"23_CR43","unstructured":"Liu, T., Zhao, Y., Joshi, R., Khalman, M., Saleh, M., Liu, P.J., Liu, J.: Statistical rejection sampling improves preference optimization. arXiv preprint arXiv:2309.06657 (2023)"},{"key":"23_CR44","doi-asserted-by":"crossref","unstructured":"Liu, Y., et\u00a0al.: MMBench: is your multi-modal model an all-around player? ArXiv preprint arXiv:2307.06281 (2023)","DOI":"10.1007\/978-3-031-72658-3_13"},{"key":"23_CR45","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101 (2017)"},{"key":"23_CR46","unstructured":"Lu, P., et al.: Learn to Explain: multimodal reasoning via thought chains for science question answering. In: Advances in Neural Information Processing Systems (2022)"},{"key":"23_CR47","doi-asserted-by":"crossref","unstructured":"Mishra, S., Khashabi, D., Baral, C., Hajishirzi, H.: Cross-task generalization via natural language crowdsourcing instructions. arXiv preprint arXiv:2104.08773 (2021)","DOI":"10.18653\/v1\/2022.acl-long.244"},{"key":"23_CR48","unstructured":"Mitchell, E.: A note on DPO with noisy preferences & relationship to IPO (2024)"},{"key":"23_CR49","doi-asserted-by":"crossref","unstructured":"Nilsback, M.E., Zisserman, A.: Automated flower classification over a large number of classes. In: Indian Conference on Computer Vision, Graphics & Image Processing, pp. 722\u2013729 (2008)","DOI":"10.1109\/ICVGIP.2008.47"},{"key":"23_CR50","unstructured":"OpenAI: Introducing ChatGPT (2022)"},{"key":"23_CR51","unstructured":"OpenAI: GPT-4V(ision) system card (2023)"},{"key":"23_CR52","unstructured":"Ouyang, L., et\u00a0al.: Training language models to follow instructions with human feedback. In: Advances in Neural Information Processing Systems (2022)"},{"key":"23_CR53","doi-asserted-by":"crossref","unstructured":"Parkhi, O.M., Vedaldi, A., Zisserman, A., Jawahar, C.: Cats and dogs. In: IEEE Conference on Computer Vision and Pattern Recognition (2012)","DOI":"10.1109\/CVPR.2012.6248092"},{"key":"23_CR54","unstructured":"Paszke, A., et al.: Automatic differentiation in PyTorch (2017)"},{"key":"23_CR55","unstructured":"Peng, Z., et al.: Kosmos-2: grounding multimodal large language models to the world. arXiv preprint arXiv:2306.14824 (2023)"},{"key":"23_CR56","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning (2021)"},{"key":"23_CR57","unstructured":"Rafailov, R., Sharma, A., Mitchell, E., Manning, C.D., Ermon, S., Finn, C.: Direct preference optimization: your language model is secretly a reward model. In: Advances on Neural Information Processing Systems (2024)"},{"key":"23_CR58","doi-asserted-by":"crossref","unstructured":"Rajbhandari, S., Ruwase, O., Rasley, J., Smith, S., He, Y.: Zero-Infinity: breaking the GPU memory wall for extreme scale deep learning. In: Proceedings of the International Conference for High Performance Computing, Networking, Storage and Analysis (2021)","DOI":"10.1145\/3458817.3476205"},{"key":"23_CR59","doi-asserted-by":"crossref","unstructured":"Rasley, J., Rajbhandari, S., Ruwase, O., He, Y.: DeepSpeed: system optimizations enable training deep learning models with over 100 billion parameters. In: Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining (2020)","DOI":"10.1145\/3394486.3406703"},{"key":"23_CR60","unstructured":"Schuhmann, C., et\u00a0al.: LAION-5B: an open large-scale dataset for training next generation image-text models. In: Advances in Neural Information Processing Systems (2022)"},{"key":"23_CR61","unstructured":"Schulman, J., Wolski, F., Dhariwal, P., Radford, A., Klimov, O.: Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347 (2017)"},{"key":"23_CR62","doi-asserted-by":"crossref","unstructured":"Singh, A., et al.: Towards VQA models that can read. In: IEEE Conference on Computer Vision and Pattern Recognition (2019)","DOI":"10.1109\/CVPR.2019.00851"},{"key":"23_CR63","unstructured":"Soomro, K., Zamir, A.R., Shah, M.: Ucf101: a dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402 (2012)"},{"key":"23_CR64","unstructured":"Stiennon, N., et al.: Learning to summarize with human feedback. In: Advances in Neural Information Processing Systems (2020)"},{"key":"23_CR65","doi-asserted-by":"crossref","unstructured":"Sun, Z., et\u00a0al.: Aligning large multimodal models with factually augmented RLHF. arXiv preprint arXiv:2309.14525 (2023)","DOI":"10.18653\/v1\/2024.findings-acl.775"},{"key":"23_CR66","unstructured":"Team, G., et\u00a0al.: Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023)"},{"key":"23_CR67","unstructured":"Thoppilan, R., et\u00a0al.: LaMDA: language models for dialog applications. arXiv preprint arXiv:2201.08239 (2022)"},{"key":"23_CR68","unstructured":"Touvron, H., et\u00a0al.: LLaMA: open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)"},{"key":"23_CR69","unstructured":"Touvron, H., et\u00a0al.: LLaMA 2: open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288 (2023)"},{"key":"23_CR70","unstructured":"Wang, J., et al.: GIT: a generative image-to-text transformer for vision and language. arXiv preprint arXiv:2205.14100 (2022)"},{"key":"23_CR71","unstructured":"Wang, J., et al.: An LLM-free multi-dimensional benchmark for MLLMs hallucination evaluation. arXiv preprint arXiv:2311.07397 (2023)"},{"key":"23_CR72","unstructured":"Wang, J., et\u00a0al.: Evaluation and analysis of hallucination in large vision-language models. arXiv preprint arXiv:2308.15126 (2023)"},{"key":"23_CR73","doi-asserted-by":"publisher","first-page":"229","DOI":"10.1007\/BF00992696","volume":"8","author":"RJ Williams","year":"1992","unstructured":"Williams, R.J.: Simple statistical gradient-following algorithms for connectionist reinforcement learning. Mach. Learn. 8, 229\u2013256 (1992)","journal-title":"Mach. Learn."},{"key":"23_CR74","doi-asserted-by":"crossref","unstructured":"Xiao, J., Hays, J., Ehinger, K.A., Oliva, A., Torralba, A.: Sun Database: large-scale scene recognition from abbey to zoo. In: IEEE Conference on Computer Vision and Pattern Recognition (2010)","DOI":"10.1109\/CVPR.2010.5539970"},{"key":"23_CR75","unstructured":"Xu, J., Lee, A., Sukhbaatar, S., Weston, J.: Some things are more cringe than others: Preference optimization with the pairwise cringe loss. arXiv preprint arXiv:2312.16682 (2023)"},{"key":"23_CR76","unstructured":"Ye, Q., et\u00a0al.: mPLUG-Owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178 (2023)"},{"key":"23_CR77","unstructured":"You, H., et al.: Ferret: refer and ground anything anywhere at any granularity. arXiv preprint arXiv:2310.07704 (2023)"},{"key":"23_CR78","unstructured":"Zhai, B., et al.: HallE-Switch: rethinking and controlling object existence hallucinations in large vision language models for detailed caption. arXiv preprint arXiv:2310.01779 (2023)"},{"key":"23_CR79","doi-asserted-by":"crossref","unstructured":"Zhai, X., Mustafa, B., Kolesnikov, A., Beyer, L.: Sigmoid loss for language image pre-training. arXiv preprint arXiv:2303.15343 (2023)","DOI":"10.1109\/ICCV51070.2023.01100"},{"key":"23_CR80","unstructured":"Zhang, S., et\u00a0al.: OPT: open pre-trained transformer language models. arXiv preprint arXiv:2205.01068 (2022)"},{"key":"23_CR81","unstructured":"Zhao, H., et al.: MMICL: empowering vision-language model with multi-modal in-context learning. arXiv preprint arXiv:2309.07915 (2023)"},{"key":"23_CR82","unstructured":"Zhao, Y., Joshi, R., Liu, T., Khalman, M., Saleh, M., Liu, P.J.: SLiC-HF: sequence likelihood calibration with human feedback. arXiv preprint arXiv:2305.10425 (2023)"},{"key":"23_CR83","unstructured":"Zhao, Z., Wang, B., Ouyang, L., Dong, X., Wang, J., He, C.: Beyond hallucinations: enhancing LVLMs through hallucination-aware direct preference optimization. arXiv preprint arXiv:2311.16839 (2023)"},{"key":"23_CR84","unstructured":"Zhou, C., et\u00a0al.: LIMA: less is more for alignment. In: Advances in Neural Information Processing Systems (2024)"},{"key":"23_CR85","doi-asserted-by":"crossref","unstructured":"Zhou, K., Yang, J., Loy, C.C., Liu, Z.: Conditional prompt learning for vision-language models. In: IEEE Conference on Computer Vision and Pattern Recognition (2022)","DOI":"10.1109\/CVPR52688.2022.01631"},{"key":"23_CR86","unstructured":"Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: MiniGPT-4: enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592 (2023)"},{"key":"23_CR87","doi-asserted-by":"crossref","unstructured":"Zhu, Y., Zhu, M., Liu, N., Ou, Z., Mou, X., Tang, J.: LLaVA-phi: efficient multi-modal assistant with small language model. arXiv preprint arXiv:2401.02330 (2024)","DOI":"10.1145\/3688863.3689575"},{"key":"23_CR88","unstructured":"Ziegler, D.M., et al.: Fine-tuning language models from human preferences. arXiv preprint arXiv:1909.08593 (2019)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73116-7_23","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,30]],"date-time":"2024-11-30T14:17:20Z","timestamp":1732976240000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73116-7_23"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,31]]},"ISBN":["9783031731150","9783031731167"],"references-count":88,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73116-7_23","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10,31]]},"assertion":[{"value":"31 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}