{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T16:36:22Z","timestamp":1778085382171,"version":"3.51.4"},"publisher-location":"Cham","reference-count":68,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031726453","type":"print"},{"value":"9783031726460","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T00:00:00Z","timestamp":1730073600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T00:00:00Z","timestamp":1730073600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72646-0_21","type":"book-chapter","created":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T08:45:29Z","timestamp":1730105129000},"page":"360-377","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":30,"title":["Towards Open-Ended Visual Quality Comparison"],"prefix":"10.1007","author":[{"given":"Haoning","family":"Wu","sequence":"first","affiliation":[]},{"given":"Hanwei","family":"Zhu","sequence":"additional","affiliation":[]},{"given":"Zicheng","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Erli","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Chaofeng","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Liang","family":"Liao","sequence":"additional","affiliation":[]},{"given":"Chunyi","family":"Li","sequence":"additional","affiliation":[]},{"given":"Annan","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Wenxiu","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Qiong","family":"Yan","sequence":"additional","affiliation":[]},{"given":"Xiaohong","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Guangtao","family":"Zhai","sequence":"additional","affiliation":[]},{"given":"Shiqi","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Weisi","family":"Lin","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,28]]},"reference":[{"key":"21_CR1","doi-asserted-by":"crossref","unstructured":"Antol, S., Agrawal, A., Lu, J., Mitchell, M., Batra, D., Zitnick, C.L., Parikh, D.: VQA: visual question answering. In: IEEE ICCV, pp. 2425\u20132433 (2015)","DOI":"10.1109\/ICCV.2015.279"},{"key":"21_CR2","doi-asserted-by":"crossref","unstructured":"Chen, L., et al.: ShareGPT4V: improving large multi-modal models with better captions. CoRR arxiv:2311.12793 (2023)","DOI":"10.1007\/978-3-031-72643-9_22"},{"key":"21_CR3","unstructured":"Chen, X., et al.: Microsoft COCO captions: data collection and evaluation server. CoRR arxiv:1504.00325 (2015)"},{"key":"21_CR4","unstructured":"Dai, W., et al.: InstructBLIP: towards general-purpose vision-language models with instruction tuning. CoRR arxiv:2305.06500 (2023)"},{"key":"21_CR5","unstructured":"Dong, X., et al.: InternLM-XComposer2: mastering free-form text-image composition and comprehension in vision-language large model. CoRR arxiv:2401.16420 (2024)"},{"key":"21_CR6","doi-asserted-by":"crossref","unstructured":"Fang, Y., Zhu, H., Zeng, Y., Ma, K., Wang, Z.: Perceptual quality assessment of smartphone photography. In: IEEE CVPR, pp. 3677\u20133686 (2020)","DOI":"10.1109\/CVPR42600.2020.00373"},{"issue":"1","key":"21_CR7","first-page":"372","volume":"25","author":"D Ghadiyaram","year":"2016","unstructured":"Ghadiyaram, D., Bovik, A.C.: Massive online crowdsourced study of subjective and objective picture quality. IEEE TIP 25(1), 372\u2013387 (2016)","journal-title":"IEEE TIP"},{"key":"21_CR8","doi-asserted-by":"crossref","unstructured":"Golestaneh, S.A., Dadsetan, S., Kitani, K.M.: No-reference image quality assessment via transformers, relative ranking, and self-consistency. In: IEEE WACV, pp. 3209\u20133218 (2022)","DOI":"10.1109\/WACV51458.2022.00404"},{"key":"21_CR9","unstructured":"Google: Gemini Pro (2023). https:\/\/deepmind.google\/technologies\/gemini"},{"key":"21_CR10","doi-asserted-by":"crossref","unstructured":"Gu, J., Cai, H., Chen, H., Ye, X., Ren, J., Dong, C.: PIPAL: a large-scale image quality assessment dataset for perceptual image restoration. In: ECCV, pp. 633\u2013651 (2020)","DOI":"10.1007\/978-3-030-58621-8_37"},{"key":"21_CR11","unstructured":"Hendrycks, D., et al.: Measuring massive multitask language understanding. In: ICLR, pp. 1\u201310 (2021)"},{"key":"21_CR12","first-page":"4041","volume":"29","author":"V Hosu","year":"2020","unstructured":"Hosu, V., Lin, H., Sziranyi, T., Saupe, D.: Koniq-10k: an ecologically valid database for deep learning of blind image quality assessment. IEEE TIP 29, 4041\u20134056 (2020)","journal-title":"IEEE TIP"},{"key":"21_CR13","doi-asserted-by":"crossref","unstructured":"Hudson, D.A., Manning, C.D.: GQA: a new dataset for real-world visual reasoning and compositional question answering. In: IEEE CVPR, pp. 6700\u20136709 (2019)","DOI":"10.1109\/CVPR.2019.00686"},{"key":"21_CR14","unstructured":"ITU-R, B.T.: Methodology for the subjective assessment of the quality of television pictures (2002). https:\/\/www.itu.int\/rec\/R-REC-BT.500"},{"key":"21_CR15","doi-asserted-by":"crossref","unstructured":"Jayaraman, D., Mittal, A., Moorthy, A.K., Bovik, A.C.: Objective quality assessment of multiply distorted images. In: ASILOMAR, pp. 1693\u20131697 (2012)","DOI":"10.1109\/ACSSC.2012.6489321"},{"key":"21_CR16","unstructured":"Jiang, A.Q., et al.: Mixtral of experts. CoRR arxiv:2401.04088 (2024)"},{"key":"21_CR17","unstructured":"LAION: LAION GPT-4V dataset (2023). https:\/\/huggingface.co\/datasets\/laion\/gpt4v-dataset"},{"key":"21_CR18","unstructured":"Li, C., et al.: AGIQA-3K: an open database for ai-generated image quality assessment. CoRR arxiv:2306.04717 (2023)"},{"key":"21_CR19","doi-asserted-by":"crossref","unstructured":"Li, D., Jiang, T., Jiang, M.: Quality assessment of in-the-wild videos. In: ACM MM, pp. 2351\u20132359 (2019)","DOI":"10.1145\/3343031.3351028"},{"issue":"5","key":"21_CR20","first-page":"1221","volume":"21","author":"D Li","year":"2019","unstructured":"Li, D., Jiang, T., Lin, W., Jiang, M.: Which has better visual quality: the clear blue sky or a blurry animal? IEEE TMM 21(5), 1221\u20131234 (2019)","journal-title":"IEEE TMM"},{"key":"21_CR21","unstructured":"Li, J., Mantiuk, R., Wang, J., Ling, S., Le\u00a0Callet, P.: Hybrid-MST: a hybrid active sampling strategy for pairwise preference aggregation. In: NeurIPS, pp. 1\u201311 (2018)"},{"key":"21_CR22","doi-asserted-by":"crossref","unstructured":"Li, Y., Wang, S., Zhang, X., Wang, S., Ma, S., Wang, Y.: Quality assessment of end-to-end learned image compression: the benchmark and objective measure. In: ACM MM, pp. 4297\u20134305 (2021)","DOI":"10.1145\/3474085.3475569"},{"key":"21_CR23","doi-asserted-by":"crossref","unstructured":"Lin, H., Hosu, V., Saupe, D.: KADID-10k: a large-scale artificially distorted iqa database. In: QoMEX, pp.\u00a01\u20133 (2019)","DOI":"10.1109\/QoMEX.2019.8743252"},{"key":"21_CR24","unstructured":"Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning. CoRR arxiv:2310.03744 (2023)"},{"key":"21_CR25","unstructured":"Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning. CoRR arxiv:2304.08485 (2023)"},{"key":"21_CR26","doi-asserted-by":"crossref","unstructured":"Liu, X., Van De\u00a0Weijer, J., Bagdanov, A.D.: RankIQA: learning from rankings for no-reference image quality assessment. In: IEEE ICCV, pp. 1040\u20131049 (2017)","DOI":"10.1109\/ICCV.2017.118"},{"issue":"8","key":"21_CR27","doi-asserted-by":"publisher","first-page":"1862","DOI":"10.1109\/TPAMI.2019.2899857","volume":"41","author":"X Liu","year":"2019","unstructured":"Liu, X., Van De Weijer, J., Bagdanov, A.D.: Exploiting unlabeled data in cnns by self-supervised learning to rank. IEEE TPAMI 41(8), 1862\u20131878 (2019)","journal-title":"IEEE TPAMI"},{"key":"21_CR28","unstructured":"Liu, Y., et al.: MMBench: is your multi-modal model an all-around player? CoRR arxiv:2307.06281 (2023)"},{"issue":"8","key":"21_CR29","first-page":"3951","volume":"26","author":"K Ma","year":"2017","unstructured":"Ma, K., Liu, W., Liu, T., Wang, Z., Tao, D.: dipIQ: Blind image quality assessment by learning-to-rank discriminable image pairs. IEEE TIP 26(8), 3951\u20133964 (2017)","journal-title":"IEEE TIP"},{"key":"21_CR30","doi-asserted-by":"crossref","unstructured":"Mantiuk, R.K., Tomaszewska, A., Mantiuk, R.: Comparison of four subjective methods for image quality assessment. In: Computer Graphics Forum, vol.\u00a031, pp. 2478\u20132491 (2012)","DOI":"10.1111\/j.1467-8659.2012.03188.x"},{"key":"21_CR31","unstructured":"Michaelis, C., et al.: Benchmarking robustness in object detection: autonomous driving when winter is coming. CoRR arxiv:1907.07484 (2019)"},{"key":"21_CR32","doi-asserted-by":"crossref","unstructured":"Mikhailiuk, A., Wilmot, C., Perez-Ortiz, M., Yue, D., Mantiuk, R.K.: Active sampling for pairwise comparisons via approximate message passing and information gain maximization. In: IEEE ICPR, pp. 2559\u20132566 (2021)","DOI":"10.1109\/ICPR48806.2021.9412676"},{"key":"21_CR33","unstructured":"OpenAI: GPT-4technical report (2023)"},{"key":"21_CR34","doi-asserted-by":"crossref","unstructured":"Prashnani, E., Cai, H., Mostofi, Y., Sen, P.: PieAPP: perceptual image-error assessment through pairwise preference. In: IEEE CVPR, pp. 1808\u20131817 (2018)","DOI":"10.1109\/CVPR.2018.00194"},{"key":"21_CR35","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: ICML, pp. 8748\u20138763 (2021)"},{"key":"21_CR36","unstructured":"Rajkumar, A., Agarwal, S.: When can we rank well from comparisons of o (nlog (n)) non-actively chosen pairs? In: Conference on Learning Theory, pp. 1376\u20131401 (2016)"},{"key":"21_CR37","doi-asserted-by":"publisher","unstructured":"Schwenk, D., Khandelwal, A., Clark, C., Marino, K., Mottaghi, R.: A-OKVQA: a benchmark for visual question answering using world knowledge. In: ECCV 2022, pp. 146\u2013162. Springer, Heidelberg (2022). https:\/\/doi.org\/10.1007\/978-3-031-20074-8_9","DOI":"10.1007\/978-3-031-20074-8_9"},{"key":"21_CR38","unstructured":"SkunkworksAI: BakLLaVA (2024). https:\/\/github.com\/SkunkworksAI\/BakLLaVA"},{"key":"21_CR39","unstructured":"Sun, Q., et\u00a0al.: Generative multimodal models are in-context learners. CoRR arxiv:2312.13286 (2023)"},{"issue":"2","key":"21_CR40","doi-asserted-by":"publisher","first-page":"64","DOI":"10.1145\/2812802","volume":"59","author":"B Thomee","year":"2016","unstructured":"Thomee, B., et al.: YFCC100M: the new data in multimedia research. Commun. ACM 59(2), 64\u201373 (2016)","journal-title":"Commun. ACM"},{"key":"21_CR41","unstructured":"Touvron, H., et al.: Llama 2: open foundation and fine-tuned chat models. CoRR arxiv:2307.09288 (2023)"},{"key":"21_CR42","unstructured":"Tsukida, K., Gupta, M.R.: How to analyze paired comparison data (Technical Report UWEETR-2011-0004, University of Washington, 2011) (2011). https:\/\/api.semanticscholar.org\/CorpusID:15425240"},{"key":"21_CR43","unstructured":"Wang, L., Yang, N., Huang, X., Yang, L., Majumder, R., Wei, F.: Improving text embeddings with large language models. CoRR arxiv:2401.00368 (2024)"},{"key":"21_CR44","unstructured":"Wauthier, F., Jordan, M., Jojic, N.: Efficient ranking from pairwise comparisons. In: ICML, pp. 109\u2013117 (2013)"},{"key":"21_CR45","doi-asserted-by":"crossref","unstructured":"Wu, H., et al.: FAST-VQA: efficient end-to-end video quality assessment with fragment sampling. In: ECCV, pp. 538\u2013554 (2022)","DOI":"10.1007\/978-3-031-20068-7_31"},{"key":"21_CR46","doi-asserted-by":"crossref","unstructured":"Wu, H., et al.: Neighbourhood representative sampling for efficient end-to-end video quality assessment. IEEE TPAMI (2023)","DOI":"10.1109\/TPAMI.2023.3319332"},{"key":"21_CR47","doi-asserted-by":"crossref","unstructured":"Wu, H., et al.: Exploring video quality assessment on user generated contents from aesthetic and technical perspectives. In: IEEE ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.01843"},{"key":"21_CR48","unstructured":"Wu, H., et al.: Q-Bench: a benchmark for general-purpose foundation models on low-level vision. In: ICLR (2024)"},{"key":"21_CR49","doi-asserted-by":"crossref","unstructured":"Wu, H., et\u00a0al.: Q-Instruct: improving low-level visual abilities for multi-modality foundation models. In: IEEE CVPR, pp. 1\u201316 (2024)","DOI":"10.1109\/CVPR52733.2024.02408"},{"key":"21_CR50","unstructured":"Wu, H., et\u00a0al.: Q-align: teaching lmms for visual scoring via discrete text-defined levels. CoRR arxiv:2312.17090 (2023)"},{"key":"21_CR51","doi-asserted-by":"crossref","unstructured":"Wu, X., Sun, K., Zhu, F., Zhao, R., Li, H.: Better aligning text-to-image models with human preference. CoRR arxiv:2303.14420 (2023)","DOI":"10.1109\/ICCV51070.2023.00200"},{"key":"21_CR52","unstructured":"Xu, J., et al.: ImageReward: learning and evaluating human preferences for text-to-image generation. CoRR arxiv:2304.05977 (2023)"},{"key":"21_CR53","doi-asserted-by":"crossref","unstructured":"Ye, P., Doermann, D.: Active sampling for subjective image quality assessment. In: IEEE CVPR, pp. 4249\u20134256 (2014)","DOI":"10.1109\/CVPR.2014.541"},{"key":"21_CR54","unstructured":"Ye, Q., et al.: mPLUG-Owl: modularization empowers large language models with multimodality. CoRR arxiv:2304.14178 (2023)"},{"key":"21_CR55","doi-asserted-by":"crossref","unstructured":"Ye, Q., et al.: mPLUG-Owl2: revolutionizing multi-modal large language model with modality collaboration. CoRR arxiv:2311.04257 (2023)","DOI":"10.1109\/CVPR52733.2024.01239"},{"key":"21_CR56","doi-asserted-by":"crossref","unstructured":"Yim, J.G., Wang, Y., Birkbeck, N., Adsumilli, B.: Subjective quality assessment for youtube UGC dataset. In: IEEE ICIP, pp.\u00a01\u20135 (2020)","DOI":"10.1109\/ICIP40778.2020.9191194"},{"key":"21_CR57","unstructured":"Yin, Z., et\u00a0al.: LAMM: language-assisted multi-modal instruction-tuning dataset, framework, and benchmark. CoRR arxiv:abs\/2306.06687 (2023)"},{"key":"21_CR58","doi-asserted-by":"crossref","unstructured":"Ying, Z., Niu, H., Gupta, P., Mahajan, D., Ghadiyaram, D., Bovik, A.: From patches to pictures (PaQ-2-PiQ): mapping the perceptual space of picture quality. In: IEEE CVPR, pp. 3575\u20133585 (2020)","DOI":"10.1109\/CVPR42600.2020.00363"},{"key":"21_CR59","unstructured":"Yue, X., et\u00a0al.: MMMU: a massive multi-discipline multimodal understanding and reasoning benchmark for expert AGI. CoRR arxiv:2311.16502 (2023)"},{"key":"21_CR60","doi-asserted-by":"crossref","unstructured":"Zhang, C., Su, S., Zhu, Y., Yan, Q., Sun, J., Zhang, Y.: Exploring and evaluating image restoration potential in dynamic scenes. In: IEEE CVPR, pp. 2057\u20132066 (2022)","DOI":"10.1109\/CVPR52688.2022.00211"},{"key":"21_CR61","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: IEEE CVPR, pp. 586\u2013595 (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"key":"21_CR62","first-page":"3474","volume":"30","author":"W Zhang","year":"2021","unstructured":"Zhang, W., Ma, K., Zhai, G., Yang, X.: Uncertainty-aware blind image quality assessment in the laboratory and wild. IEEE TIP 30, 3474\u20133486 (2021)","journal-title":"IEEE TIP"},{"key":"21_CR63","doi-asserted-by":"crossref","unstructured":"Zhang, W., Zhai, G., Wei, Y., Yang, X., Ma, K.: Blind image quality assessment via vision-language correspondence: a multitask learning perspective. In: IEEE CVPR, pp. 14071\u201314081 (2023)","DOI":"10.1109\/CVPR52729.2023.01352"},{"key":"21_CR64","doi-asserted-by":"crossref","unstructured":"Zhang, W., Liu, Y., Dong, C., Qiao, Y.: RankSRGAN: generative adversarial networks with ranker for image super-resolution. In: IEEE ICCV, pp. 3096\u20133105 (2019)","DOI":"10.1109\/ICCV.2019.00319"},{"issue":"4","key":"21_CR65","first-page":"1","volume":"20","author":"Z Zhang","year":"2023","unstructured":"Zhang, Z., et al.: Subjective and objective quality assessment for in-the-wild computer graphics images. ACM TOMM 20(4), 1\u201322 (2023)","journal-title":"ACM TOMM"},{"key":"21_CR66","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Wu, H., Zhang, E., Zhai, G., Lin, W.: A benchmark for multi-modal foundation models on low-level vision: from single images to pairs. CoRR arxiv:2402.07116 (2024)","DOI":"10.1109\/TPAMI.2024.3445770"},{"key":"21_CR67","unstructured":"Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: MiniGPT-4: enhancing vision-language understanding with advanced large language models. CoRR arxiv:2304.10592 (2023)"},{"key":"21_CR68","doi-asserted-by":"crossref","unstructured":"Zhu, H., et al.: 2AFC prompting of large multimodal models for image quality assessment. CoRR arxiv:2402.01162 (2024)","DOI":"10.1109\/TCSVT.2024.3434999"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72646-0_21","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,30]],"date-time":"2024-11-30T10:14:25Z","timestamp":1732961665000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72646-0_21"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,28]]},"ISBN":["9783031726453","9783031726460"],"references-count":68,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72646-0_21","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10,28]]},"assertion":[{"value":"28 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}