{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T23:06:46Z","timestamp":1774480006537,"version":"3.50.1"},"publisher-location":"Cham","reference-count":73,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031729034","type":"print"},{"value":"9783031729041","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T00:00:00Z","timestamp":1732147200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,21]],"date-time":"2024-11-21T00:00:00Z","timestamp":1732147200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72904-1_9","type":"book-chapter","created":{"date-parts":[[2024,11,20]],"date-time":"2024-11-20T13:28:32Z","timestamp":1732109312000},"page":"143-160","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":29,"title":["A Comprehensive Study of\u00a0Multimodal Large Language Models for\u00a0Image Quality Assessment"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0009-6889-1907","authenticated-orcid":false,"given":"Tianhe","family":"Wu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8608-1128","authenticated-orcid":false,"given":"Kede","family":"Ma","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2822-5466","authenticated-orcid":false,"given":"Jie","family":"Liang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6427-1024","authenticated-orcid":false,"given":"Yujiu","family":"Yang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2078-4215","authenticated-orcid":false,"given":"Lei","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,21]]},"reference":[{"key":"9_CR1","doi-asserted-by":"crossref","unstructured":"Achananuparp, P., Hu, X., Shen, X.: The evaluation of sentence similarity measures. In: Data Warehousing and Knowledge Discovery, pp. 305\u2013316 (2008)","DOI":"10.1007\/978-3-540-85836-2_29"},{"key":"9_CR2","unstructured":"Achiam, J., et al.: GPT-4 technical report. arXiv preprint arXiv:2303.08774 (2023)"},{"key":"9_CR3","unstructured":"Alayrac, J.B., et al.: flamingo: a visual language model for few-shot learning. In: Advances in Neural Information Processing Systems, vol. 35, pp. 23716\u201323736 (2022)"},{"key":"9_CR4","unstructured":"Bai, J., et al.: Qwen-VL: a versatile vision-language model for understanding, localization, text reading, and beyond. arXiv preprint arXiv:2308.12966 (2023)"},{"key":"9_CR5","doi-asserted-by":"crossref","unstructured":"Bracci, S., Mraz, J., Zeman, A., Leys, G., Op\u00a0de Beeck, H.: The representational hierarchy in human and artificial visual systems in the presence of object-scene regularities. PLOS Comput. Biol. 19(4), 1\u20135 (2023)","DOI":"10.1371\/journal.pcbi.1011086"},{"key":"9_CR6","unstructured":"Brown, T., et al.: Language models are few-shot learners. In: Advances in Neural Information Processing Systems, vol. 33, pp. 1877\u20131901 (2020)"},{"key":"9_CR7","unstructured":"Cao, P., Li, D., Ma, K.: Image quality assessment: integrating model-centric and data-centric approaches. In: Conference on Parsimony and Learning, pp. 529\u2013541 (2024)"},{"key":"9_CR8","doi-asserted-by":"crossref","unstructured":"Chen, C., et al.: TOPIQ: a top-down approach from semantics to distortions for image quality assessment. arXiv preprint arXiv:2308.03060 (2023)","DOI":"10.1109\/TIP.2024.3378466"},{"key":"9_CR9","doi-asserted-by":"crossref","unstructured":"Chen, H., Wang, Z., Yang, Y., Sun, Q., Ma, K.: Learning a deep color difference metric for photographic images. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 22242\u201322251 (2023)","DOI":"10.1109\/CVPR52729.2023.02130"},{"key":"9_CR10","doi-asserted-by":"crossref","unstructured":"Chubarau, A., Akhavan, T., Yoo, H., Mantiuk, R.K., Clark, J.: Perceptual image quality assessment for various viewing conditions and display systems. In: Image Quality and System Performance, pp.\u00a01\u20139 (2020)","DOI":"10.2352\/ISSN.2470-1173.2020.9.IQSP-067"},{"issue":"5","key":"9_CR11","first-page":"2567","volume":"44","author":"K Ding","year":"2020","unstructured":"Ding, K., Ma, K., Wang, S., Simoncelli, E.P.: Image quality assessment: unifying structure and texture similarity. IEEE Trans. Pattern Anal. Mach. Intell. 44(5), 2567\u20132581 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"9_CR12","unstructured":"Dong, Q., et al.: A survey for in-context learning. arXiv preprint arXiv:2301.00234 (2022)"},{"key":"9_CR13","unstructured":"Dong, X., et al.: InternLM-XComposer2: mastering free-form text-image composition and comprehension in vision-language large model. arXiv preprint arXiv:2401.16420 (2024)"},{"key":"9_CR14","unstructured":"Dosovitskiy, A., et al.: An image is worth 16$$\\times $$16 words: transformers for image recognition at scale. In: International Conference on Learning Representations (2020)"},{"key":"9_CR15","doi-asserted-by":"crossref","unstructured":"Fang, Y., Zhu, H., Zeng, Y., Ma, K., Wang, Z.: Perceptual quality assessment of smartphone photography. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3677\u20133686 (2020)","DOI":"10.1109\/CVPR42600.2020.00373"},{"key":"9_CR16","unstructured":"Gao, P., et al.: LLaMA-Adapter V2: parameter-efficient visual instruction model. arXiv preprint arXiv:2304.15010 (2023)"},{"key":"9_CR17","unstructured":"Guo, Q., et al.: Connecting large language models with evolutionary algorithms yields powerful prompt optimizers. In: International Conference on Learning Representations (2024)"},{"key":"9_CR18","unstructured":"Hu, E.J., et al.: LoRA: Low-rank adaptation of large language models. In: International Conference on Learning Representations (2022)"},{"key":"9_CR19","unstructured":"Kaplan, J., et al.: Scaling laws for neural language models. arXiv preprint arXiv:2001.08361 (2020)"},{"key":"9_CR20","doi-asserted-by":"crossref","unstructured":"Ke, J., Wang, Q., Wang, Y., Milanfar, P., Yang, F.: MUSIQ: multi-scale image quality transformer. In: IEEE\/CVF International Conference on Computer Vision, pp. 5148\u20135157 (2021)","DOI":"10.1109\/ICCV48922.2021.00510"},{"key":"9_CR21","doi-asserted-by":"crossref","unstructured":"Kewenig, V., et al.: Multimodality and attention increase alignment in natural language prediction between humans and computational models. arXiv preprint arXiv:2308.06035 (2024)","DOI":"10.21203\/rs.3.rs-3913308\/v1"},{"key":"9_CR22","doi-asserted-by":"crossref","unstructured":"Lao, S., et al.: Attentions help CNNs see better: attention-based hybrid image quality assessment network. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshop, pp. 1140\u20131149 (2022)","DOI":"10.1109\/CVPRW56347.2022.00123"},{"key":"9_CR23","doi-asserted-by":"crossref","unstructured":"Li, C., et al.: AGIQA-3K: an open database for AI-generated image quality assessment. arXiv preprint arXiv:2306.04717 (2023)","DOI":"10.1109\/TCSVT.2023.3319020"},{"key":"9_CR24","unstructured":"Li, J., Li, D., Savarese, S., Hoi, S.: BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In: International Conference on Machine Learning, pp. 19730\u201319742 (2023)"},{"key":"9_CR25","doi-asserted-by":"crossref","unstructured":"Li, X.L., Liang, P.: Prefix-tuning: optimizing continuous prompts for generation. In: Association for Computational Linguistics and International Joint Conference on Natural Language Processing, pp. 4582\u20134597 (2021)","DOI":"10.18653\/v1\/2021.acl-long.353"},{"issue":"8","key":"9_CR26","doi-asserted-by":"publisher","first-page":"1138","DOI":"10.1109\/TKDE.2006.130","volume":"18","author":"Y Li","year":"2006","unstructured":"Li, Y., McLean, D., Bandar, Z.A., O\u2019shea, J.D., Crockett, K.: Sentence similarity based on semantic nets and corpus statistics. IEEE Trans. Knowl. Data Eng. 18(8), 1138\u20131150 (2006)","journal-title":"IEEE Trans. Knowl. Data Eng."},{"key":"9_CR27","doi-asserted-by":"crossref","unstructured":"Liang, Z., Li, C., Zhou, S., Feng, R., Loy, C.C.: Iterative prompt learning for unsupervised backlit image enhancement. In: IEEE\/CVF International Conference on Computer Vision, pp. 8094\u20138103 (2023)","DOI":"10.1109\/ICCV51070.2023.00743"},{"key":"9_CR28","doi-asserted-by":"crossref","unstructured":"Lin, H., Hosu, V., Saupe, D.: KADID-10k: a large-scale artificially distorted IQA database. In: International Conference on Quality of Multimedia Experience, pp. 1\u20133 (2019)","DOI":"10.1109\/QoMEX.2019.8743252"},{"key":"9_CR29","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. In: Advances in Neural Information Processing Systems, vol.\u00a036, pp. 1\u201325 (2024)"},{"key":"9_CR30","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"9_CR31","doi-asserted-by":"crossref","unstructured":"Ma, K., Duanmu, Z., Wang, Z.: Geometric transformation invariant image quality assessment using convolutional neural networks. In: IEEE International Conference on Acoustics, Speech and Signal Processing, pp. 6732\u20136736 (2018)","DOI":"10.1109\/ICASSP.2018.8462176"},{"issue":"3","key":"9_CR32","doi-asserted-by":"publisher","first-page":"1202","DOI":"10.1109\/TIP.2017.2774045","volume":"27","author":"K Ma","year":"2017","unstructured":"Ma, K., Liu, W., Zhang, K., Duanmu, Z., Wang, Z., Zuo, W.: End-to-end blind image quality assessment using deep neural networks. IEEE Trans. Image Process. 27(3), 1202\u20131213 (2017)","journal-title":"IEEE Trans. Image Process."},{"key":"9_CR33","doi-asserted-by":"crossref","unstructured":"Ma, K., et al.: Group MAD competition-a new methodology to compare objective image quality models. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1664\u20131673 (2016)","DOI":"10.1109\/CVPR.2016.184"},{"key":"9_CR34","doi-asserted-by":"crossref","unstructured":"Mittal, A., Soundararajan, R., Bovik, A.C.: Making a \u201ccompletely blind\u201d image quality analyzer. IEEE Sign. Process. Lett. 20(3), 209\u2013212 (2012)","DOI":"10.1109\/LSP.2012.2227726"},{"key":"9_CR35","unstructured":"Ngo, R., Chan, L., Mindermann, S.: The alignment problem from a deep learning perspective. In: International Conference on Learning Representations (2022)"},{"key":"9_CR36","unstructured":"Ouyang, L., et al.: Training language models to follow instructions with human feedback. In: Advances in Neural Information Processing Systems, vol. 35, pp. 27730\u201327744 (2022)"},{"key":"9_CR37","doi-asserted-by":"crossref","unstructured":"Papernot, N., McDaniel, P., Goodfellow, I., Jha, S., Celik, Z.B., Swami, A.: Practical black-box attacks against machine learning. In: ACM Asia Conference on Computer and Communications Security, pp. 506\u2013519 (2017)","DOI":"10.1145\/3052973.3053009"},{"key":"9_CR38","unstructured":"Peng, Z., et al.: KOSMOS-2: grounding multimodal large language models to the world. arXiv preprint arXiv:2306.14824 (2023)"},{"key":"9_CR39","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763 (2021)"},{"issue":"2","key":"9_CR40","doi-asserted-by":"publisher","first-page":"430","DOI":"10.1109\/TIP.2005.859378","volume":"15","author":"HR Sheikh","year":"2006","unstructured":"Sheikh, H.R., Bovik, A.C.: Image information and visual quality. IEEE Trans. Image Process. 15(2), 430\u2013444 (2006)","journal-title":"IEEE Trans. Image Process."},{"key":"9_CR41","doi-asserted-by":"crossref","unstructured":"Shin, S., et al.: On the effect of pretraining corpora on in-context learning by a large-scale language model. In: The North American Chapter of the Association for Computational Linguistics, pp. 5168\u20135186 (2022)","DOI":"10.18653\/v1\/2022.naacl-main.380"},{"key":"9_CR42","unstructured":"Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. In: International Conference on Learning Representations (2014)"},{"key":"9_CR43","unstructured":"Team, G., et al.: Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023)"},{"key":"9_CR44","doi-asserted-by":"publisher","first-page":"273","DOI":"10.1037\/h0070288","volume":"34","author":"LL Thurstone","year":"1927","unstructured":"Thurstone, L.L.: A law of comparative judgment. Psychol. Rev. 34, 273\u2013286 (1927)","journal-title":"Psychol. Rev."},{"key":"9_CR45","unstructured":"Tong, S., et al.: Cambrian-1: a fully open, vision-centric exploration of multimodal LLMs. arXiv preprint arXiv:2406.16860 (2024)"},{"key":"9_CR46","doi-asserted-by":"crossref","unstructured":"Topiwala, P., Dai, W., Pian, J., Biondi, K., Krovvidi, A.: VMAF and variants: towards a unified VQA. In: Applications of Digital Image Processing, vol. 11842, pp. 96\u2013104 (2021)","DOI":"10.1117\/12.2594772"},{"key":"9_CR47","unstructured":"Touvron, H., et al.: LLaMA: open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)"},{"key":"9_CR48","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems, vol. 30, pp. 5998\u20136008 (2017)"},{"issue":"8","key":"9_CR49","doi-asserted-by":"publisher","first-page":"10114","DOI":"10.1109\/TPAMI.2023.3262424","volume":"45","author":"Z Wang","year":"2023","unstructured":"Wang, Z., et al.: Measuring perceptual color differences of smartphone photographs. IEEE Trans. Pattern Anal. Mach. Intell. 45(8), 10114\u201310128 (2023)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"4","key":"9_CR50","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. IEEE Trans. Image Process. 13(4), 600\u2013612 (2004)","journal-title":"IEEE Trans. Image Process."},{"key":"9_CR51","unstructured":"Wei, J., et al.: Finetuned language models are zero-shot learners. In: International Conference on Learning Representations (2022)"},{"key":"9_CR52","unstructured":"Wei, J., et al.: Chain-of-thought prompting elicits reasoning in large language models. In: Advances in Neural Information Processing Systems, vol. 35, pp. 24824\u201324837 (2022)"},{"key":"9_CR53","unstructured":"Wu, H., et al.: Q-Bench: a benchmark for general-purpose foundation models on low-level vision. In: International Conference on Learning Representations (2024)"},{"key":"9_CR54","doi-asserted-by":"crossref","unstructured":"Wu, H., et al.: Q-Instruct: improving low-level visual abilities for multi-modality foundation models. arXiv preprint arXiv:2311.06783 (2023)","DOI":"10.1109\/CVPR52733.2024.02408"},{"key":"9_CR55","unstructured":"Wu, H., et al.: Q-Align: teaching LMMs for visual scoring via discrete text-defined levels. arXiv preprint arXiv:2312.17090 (2023)"},{"key":"9_CR56","unstructured":"Wu, H., et al.: Towards open-ended visual quality comparison. arXiv preprint arXiv:2402.16641 (2024)"},{"key":"9_CR57","unstructured":"Wu, T., et al.: Assessor360: multi-sequence network for blind omnidirectional image quality assessment. In: Advances in Neural Information Processing Systems, vol. 36, pp. 1\u201314 (2024)"},{"key":"9_CR58","doi-asserted-by":"crossref","unstructured":"Yang, S., et al.: MANIQA: multi-dimension attention network for no-reference image quality assessment. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshop, pp. 1191\u20131200 (2022)","DOI":"10.1109\/CVPRW56347.2022.00126"},{"key":"9_CR59","unstructured":"Yang, Z., et al.: The dawn of LMMs: preliminary explorations with GPT-4V(ision). arXiv preprint arXiv:2309.17421 (2023)"},{"key":"9_CR60","doi-asserted-by":"crossref","unstructured":"Ye, P., Doermann, D.: Active sampling for subjective image quality assessment. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4249\u20134256 (2014)","DOI":"10.1109\/CVPR.2014.541"},{"key":"9_CR61","doi-asserted-by":"crossref","unstructured":"Ye, P., Kumar, J., Kang, L., Doermann, D.: Unsupervised feature learning framework for no-reference image quality assessment. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1098\u20131105 (2012)","DOI":"10.1109\/CVPR.2012.6247789"},{"key":"9_CR62","doi-asserted-by":"crossref","unstructured":"Ye, Q., et al.: mPLUG-Owl2: revolutionizing multi-modal large language model with modality collaboration. arXiv preprint arXiv:2311.04257 (2023)","DOI":"10.1109\/CVPR52733.2024.01239"},{"key":"9_CR63","unstructured":"Yin, S., et al.: A survey on multimodal large language models. arXiv preprint arXiv:2306.13549 (2023)"},{"key":"9_CR64","doi-asserted-by":"crossref","unstructured":"Ying, Z., Niu, H., Gupta, P., Mahajan, D., Ghadiyaram, D., Bovik, A.: From patches to pictures (PaQ-2-PiQ): mapping the perceptual space of picture quality. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3575\u20133585 (2020)","DOI":"10.1109\/CVPR42600.2020.00363"},{"key":"9_CR65","unstructured":"You, Z., et al.: Descriptive image quality assessment in the wild. arXiv preprint arXiv:2405.18842 (2024)"},{"key":"9_CR66","doi-asserted-by":"crossref","unstructured":"You, Z., Li, Z., Gu, J., Yin, Z., Xue, T., Dong, C.: Depicting beyond scores: advancing image quality assessment through multi-modal language models. arXiv preprint arXiv:2312.08962 (2023)","DOI":"10.1007\/978-3-031-72970-6_15"},{"issue":"8","key":"9_CR67","doi-asserted-by":"publisher","first-page":"2378","DOI":"10.1109\/TIP.2011.2109730","volume":"20","author":"L Zhang","year":"2011","unstructured":"Zhang, L., Zhang, L., Mou, X., Zhang, D.: FSIM: a feature similarity index for image quality assessment. IEEE Trans. Image Process. 20(8), 2378\u20132386 (2011)","journal-title":"IEEE Trans. Image Process."},{"key":"9_CR68","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 586\u2013595 (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"key":"9_CR69","doi-asserted-by":"publisher","first-page":"3474","DOI":"10.1109\/TIP.2021.3061932","volume":"30","author":"W Zhang","year":"2021","unstructured":"Zhang, W., Ma, K., Zhai, G., Yang, X.: Uncertainty-aware blind image quality assessment in the laboratory and wild. IEEE Trans. Image Process. 30, 3474\u20133486 (2021)","journal-title":"IEEE Trans. Image Process."},{"key":"9_CR70","doi-asserted-by":"crossref","unstructured":"Zhang, W., Zhai, G., Wei, Y., Yang, X., Ma, K.: Blind image quality assessment via vision-language correspondence: a multitask learning perspective. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14071\u201314081 (2023)","DOI":"10.1109\/CVPR52729.2023.01352"},{"key":"9_CR71","unstructured":"Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: MiniGPT-4: enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592 (2023)"},{"key":"9_CR72","doi-asserted-by":"crossref","unstructured":"Zhu, H., et al.: 2AFC prompting of large multimodal models for image quality assessment. arXiv preprint arXiv:2402.01162 (2024)","DOI":"10.1109\/TCSVT.2024.3434999"},{"key":"9_CR73","unstructured":"Zhuang, S., Hadfield-Menell, D.: Consequences of misaligned AI. In: Advances in Neural Information Processing Systems, vol. 33, pp. 15763\u201315773 (2020)"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72904-1_9","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,15]],"date-time":"2025-03-15T19:51:47Z","timestamp":1742068307000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72904-1_9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,21]]},"ISBN":["9783031729034","9783031729041"],"references-count":73,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72904-1_9","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,21]]},"assertion":[{"value":"21 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}