{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T18:10:32Z","timestamp":1775067032434,"version":"3.50.1"},"reference-count":105,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62276203"],"award-info":[{"award-number":["62276203"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62036007"],"award-info":[{"award-number":["62036007"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Image Process."],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tip.2025.3639984","type":"journal-article","created":{"date-parts":[[2025,12,9]],"date-time":"2025-12-09T18:35:41Z","timestamp":1765305341000},"page":"8216-8228","source":"Crossref","is-referenced-by-count":1,"title":["AI-Generated Image Quality Assessment Based on Task-Specific Prompt and Multi-Granularity Similarity"],"prefix":"10.1109","volume":"34","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8326-9360","authenticated-orcid":false,"given":"Jili","family":"Xia","sequence":"first","affiliation":[{"name":"School of Electronic Engineering, Xidian University, Xi&#x2019;an, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0555-3574","authenticated-orcid":false,"given":"Lihuo","family":"He","sequence":"additional","affiliation":[{"name":"School of Electronic Engineering, Xidian University, Xi&#x2019;an, China"}]},{"given":"Cheng","family":"Deng","sequence":"additional","affiliation":[{"name":"School of Electronic Engineering, Xidian University, Xi&#x2019;an, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9069-8796","authenticated-orcid":false,"given":"Leida","family":"Li","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, Xidian University, Xi&#x2019;an, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7985-0037","authenticated-orcid":false,"given":"Xinbo","family":"Gao","sequence":"additional","affiliation":[{"name":"School of Electronic Engineering, Xidian University, Xi&#x2019;an, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.629"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00143"},{"key":"ref3","first-page":"19822","article-title":"Cogview: Mastering text-to-image generation via transformers","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Ding","year":"2021"},{"key":"ref4","first-page":"8821","article-title":"Zero-shot text-to-image generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ramesh"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00192"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2006.881959"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1117\/1.3267105"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/QoMEX.2019.8743252"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2015.2500021"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.2967829"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00373"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00363"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2012.2214050"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.224"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00404"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00566"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00257"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICMEW59549.2023.00082"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3319020"},{"key":"ref21","first-page":"15903","article-title":"ImageReward: Learning and evaluating human preferences for text-to-image generation","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Xu"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00200"},{"key":"ref23","first-page":"36652","article-title":"Pick-a-pic: An open dataset of user preferences for text-to-image generation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kirstain"},{"key":"ref24","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW63382.2024.00644"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW63382.2024.00621"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2024.3420083"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICME57554.2024.10688254"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW63382.2024.00641"},{"key":"ref30","first-page":"106","article-title":"Color image database TID2013: Peculiarities and preliminary results","volume-title":"Proc. Eur. Workshop Vis. Inf. Process.","author":"Ponomarenko"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2012.2191563"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2013.2271356"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2015.2426416"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2016.2585880"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2017.2710419"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2017.2763321"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2020.3001537"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00372"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547982"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-024-02338-7"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP42928.2021.9506075"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW53098.2021.00054"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW54120.2021.00222"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW56347.2022.00126"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2018.2875354"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.3002478"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3149534"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW56347.2022.00133"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2018.2886771"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3188991"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/BMSB55706.2022.9828590"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2023.3270621"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3243683"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3303725"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2021.3055197"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3181496"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3375344"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3325719"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3152942"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3225728"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.118"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3114551"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2024.3397051"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01415"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2021.3073410"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3190700"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3178874"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3443414"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2024.3521705"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3369699"},{"key":"ref71","first-page":"9613","article-title":"FILIP: Fine-grained interactive language-image pre-training","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Yao"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01069"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01760"},{"key":"ref74","first-page":"23033","article-title":"SegCLIP: Patch aggregation with learnable centers for open-vocabulary semantic segmentation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Luo"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01629"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i2.25353"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01352"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3341611"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72904-1_9"},{"key":"ref80","first-page":"54015","article-title":"Q-align: Teaching LMMs for visual scoring via discrete text-defined levels","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wu"},{"key":"ref81","first-page":"4029","article-title":"Q-bench: A benchmark for general-purpose foundation models on low-level vision","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Wu"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02408"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72970-6_15"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.01350"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-99-9119-8_5"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW63382.2024.00636"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3509032"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW63382.2024.00663"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1109\/TBC.2024.3391060"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681471"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.346"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1109\/WACV57701.2024.00556"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3362062"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.301"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3052490"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2020.2984883"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2024.111795"},{"key":"ref98","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.1109\/VCIP53242.2021.9675430"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2760518"},{"key":"ref101","article-title":"Very deep convolutional networks for large-scale image recognition","author":"Simonyan","year":"2014","journal-title":"arXiv:1409.1556"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00510"},{"key":"ref103","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","author":"Jia"},{"key":"ref104","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref105","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01519"}],"container-title":["IEEE Transactions on Image Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/83\/10795784\/11291162.pdf?arnumber=11291162","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,6]],"date-time":"2026-01-06T18:38:01Z","timestamp":1767724681000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11291162\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":105,"URL":"https:\/\/doi.org\/10.1109\/tip.2025.3639984","relation":{},"ISSN":["1057-7149","1941-0042"],"issn-type":[{"value":"1057-7149","type":"print"},{"value":"1941-0042","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}