{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:33:05Z","timestamp":1775230385115,"version":"3.50.1"},"reference-count":109,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"RGC Early Career Scheme","award":["24209224"],"award-info":[{"award-number":["24209224"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62276251"],"award-info":[{"award-number":["62276251"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"Joint Laboratory of CAS-HK","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Image Process."],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tip.2025.3639998","type":"journal-article","created":{"date-parts":[[2025,12,9]],"date-time":"2025-12-09T18:35:41Z","timestamp":1765305341000},"page":"8201-8215","source":"Crossref","is-referenced-by-count":2,"title":["Enhancing Descriptive Image Quality Assessment With a Large-Scale Multi-Modal Dataset"],"prefix":"10.1109","volume":"34","author":[{"ORCID":"https:\/\/orcid.org\/0009-0006-8546-3478","authenticated-orcid":false,"given":"Zhiyuan","family":"You","sequence":"first","affiliation":[{"name":"Multimedia Laboratory, The Chinese University of Hong Kong, Hong Kong, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4389-6236","authenticated-orcid":false,"given":"Jinjin","family":"Gu","sequence":"additional","affiliation":[{"name":"INSAIT, Sofia University &#x201C;St. Kliment Ohridski&#x201D;,, Sofia, Bulgaria"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-9121-6900","authenticated-orcid":false,"given":"Xin","family":"Cai","sequence":"additional","affiliation":[{"name":"Multimedia Laboratory, The Chinese University of Hong Kong, Hong Kong, China"}]},{"given":"Zheyuan","family":"Li","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences, Shenzhen Institutes of Advanced Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-5195-5707","authenticated-orcid":false,"given":"Kaiwen","family":"Zhu","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2260-8079","authenticated-orcid":false,"given":"Chao","family":"Dong","sequence":"additional","affiliation":[{"name":"Chinese Academy of Sciences, Shenzhen Institutes of Advanced Technology, Shenzhen, China"}]},{"given":"Tianfan","family":"Xue","sequence":"additional","affiliation":[{"name":"Multimedia Laboratory, The Chinese University of Hong Kong, Hong Kong, China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"34892","article-title":"Visual instruction tuning","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Liu"},{"key":"ref2","volume-title":"GPT-4V(Ision) System Card","year":"2023"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01239"},{"key":"ref4","article-title":"Q-bench: A benchmark for general-purpose foundation models on low-level vision","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Wu"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02408"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72646-0_21"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72904-1_9"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72970-6_15"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.image.2014.10.009"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00068"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00510"},{"key":"ref12","first-page":"633","article-title":"PIPAL: A large-scale image quality assessment dataset for perceptual image restoration","volume-title":"Proc. Eur. Conf. Comput. Vis. (ECCV)","author":"Gu"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2005.859378"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2003.819861"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2011.2109730"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00194"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2760518"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00576"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2020.3045810"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475419"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19797-0_6"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i3.20221"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547899"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2016.12.009"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2012.2214050"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2012.2227726"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2010.2043888"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2011.2147325"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2012.2191563"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2011.5995446"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.224"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.118"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00667"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00372"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3152942"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01008"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01415"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i2.25353"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW56347.2022.00126"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3178874"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01352"},{"key":"ref42","volume-title":"Vicuna: An Open-Source Chatbot Impressing GPT-4 With 90% ChatGPT Quality","author":"Chiang et al","year":"2023"},{"key":"ref43","article-title":"GPT-4 technical report","volume-title":"arXiv:2303.08774","author":"Achiam","year":"2023"},{"key":"ref44","article-title":"LLaMA: Open and efficient foundation language models","author":"Touvron","year":"2023","journal-title":"arXiv:2302.13971"},{"key":"ref45","first-page":"23716","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Alayrac"},{"key":"ref46","first-page":"49250","article-title":"InstructBLIP: Towards general-purpose vision-language models with instruction tuning","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Dai"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73235-5_23"},{"key":"ref48","article-title":"MPLUG-owl: Modularization empowers large language models with multimodality","author":"Ye","year":"2023","journal-title":"arXiv:2304.14178"},{"key":"ref49","first-page":"26650","article-title":"LAMM: Language-assisted multi-modal instruction-tuning dataset, framework, and benchmark","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Yin"},{"key":"ref50","article-title":"InternLM-XComposer: A vision-language large model for advanced text-image comprehension and composition","author":"Zhang","year":"2023","journal-title":"arXiv:2309.15112"},{"key":"ref51","first-page":"1","article-title":"LLaMA-adapter: Efficient fine-tuning of large language models with zero-initialized attention","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Zhang"},{"key":"ref52","article-title":"MiniGPT-4: Enhancing vision-language understanding with advanced large language models","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Zhu"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00904"},{"key":"ref54","article-title":"Microsoft COCO captions: Data collection and evaluation server","author":"Chen","year":"2015","journal-title":"arXiv:1504.00325"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00166"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.670"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72658-3_13"},{"key":"ref58","first-page":"2507","article-title":"Learn to explain: Multimodal reasoning via thought chains for science question answering","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Lu"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.177"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/WACV48630.2021.00225"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00851"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3434999"},{"key":"ref63","first-page":"54015","article-title":"Q-align: Teaching LMMs for visual scoring via discrete text-defined levels","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wu"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.01350"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-025-09422-z"},{"key":"ref66","first-page":"1","article-title":"Q-insight: Understanding image quality via visual reinforcement learning","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Li"},{"key":"ref67","article-title":"DeepSeekMath: Pushing the limits of mathematical reasoning in open language models","author":"Shao","year":"2024","journal-title":"arXiv:2402.03300"},{"key":"ref68","article-title":"VisualQuality-r1: Reasoning-induced image quality assessment via reinforcement learning to rank","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Wu"},{"key":"ref69","article-title":"Q-ponder: A unified training pipeline for reasoning-based visual quality assessment","author":"Cai","year":"2025","journal-title":"arXiv:2506.05384"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2019.12.012"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00084"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i14.17533"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.5244\/C.34.146"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1038\/s42256-020-0212-3"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2020.101654"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/WACV56688.2023.00344"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/ICMLA.2017.0-140"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01866"},{"key":"ref79","article-title":"DeepFL-IQA: Weak supervision for deep IQA feature learning","author":"Lin","year":"2020","journal-title":"arXiv:2001.08113"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1109\/QoMEX.2019.8743252"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/ACSSC.2012.6489321"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2016.07.033"},{"key":"ref83","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1016\/j.visres.2005.06.038"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1117\/12.477378"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1167\/9.10.1"},{"key":"ref87","doi-asserted-by":"crossref","DOI":"10.1007\/978-1-84882-491-1","volume":"39","author":"Hyv\u00e4rinen","year":"2009","journal-title":"Natural Image Statistics: A Probabilistic Approach to Early Computational Vision"},{"key":"ref88","volume-title":"LLaVA-NeXT: Improved Reasoning, OCR, and World Knowledge","author":"Liu et al","year":"2024"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/d18-2012"},{"key":"ref90","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Hu"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.276"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1410"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/p17-1099"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref95","article-title":"LLaVA-OneVision-1.5: Fully open framework for democratized multimodal training","author":"An","year":"2025","journal-title":"arXiv:2509.23661"},{"key":"ref96","article-title":"Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling","author":"Chen","year":"2024","journal-title":"arXiv:2412.05271"},{"key":"ref97","article-title":"Qwen2.5-VL technical report","volume-title":"arXiv:2502.13923","author":"Bai","year":"2025"},{"key":"ref98","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.2967829"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00373"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW54120.2021.00210"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3434999"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02142"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73209-6_5"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01458"},{"key":"ref105","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00564"},{"key":"ref106","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00495"},{"key":"ref107","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00568"},{"key":"ref108","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01582"},{"key":"ref109","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00207"}],"container-title":["IEEE Transactions on Image Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/83\/10795784\/11291112.pdf?arnumber=11291112","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,23]],"date-time":"2026-01-23T21:01:44Z","timestamp":1769202104000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11291112\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":109,"URL":"https:\/\/doi.org\/10.1109\/tip.2025.3639998","relation":{},"ISSN":["1057-7149","1941-0042"],"issn-type":[{"value":"1057-7149","type":"print"},{"value":"1941-0042","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}