{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,6]],"date-time":"2026-04-06T10:20:49Z","timestamp":1775470849425,"version":"3.50.1"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,7,15]],"date-time":"2024-07-15T00:00:00Z","timestamp":1721001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,7,15]],"date-time":"2024-07-15T00:00:00Z","timestamp":1721001600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100017607","name":"Shenzhen Fundamental Research Program","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100017607","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,7,15]]},"DOI":"10.1109\/icme57554.2024.10688254","type":"proceedings-article","created":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T17:24:16Z","timestamp":1727717056000},"page":"1-6","source":"Crossref","is-referenced-by-count":31,"title":["Bringing Textual Prompt to AI-Generated Image Quality Assessment"],"prefix":"10.1109","author":[{"given":"Bowen","family":"Qu","sequence":"first","affiliation":[{"name":"Peking University,SECE,Shenzhen,China"}]},{"given":"Haohui","family":"Li","sequence":"additional","affiliation":[{"name":"Peking University,SECE,Shenzhen,China"}]},{"given":"Wei","family":"Gao","sequence":"additional","affiliation":[{"name":"Peking University,SECE,Shenzhen,China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Generative adversarial nets","volume-title":"NeurIPS","volume":"27","author":"Goodfellow"},{"key":"ref2","first-page":"19822","article-title":"Cogview: Mastering text-to-image generation via transformers","volume-title":"NeurIPS","volume":"34","author":"Ding"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00372"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2018.2886771"},{"key":"ref6","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"ICML","author":"Radford"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.51"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICMEW59549.2023.00082"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/tcsvt.2023.3319020"},{"key":"ref10","article-title":"Improved techniques for training gans","volume-title":"NeurIPS","volume":"29","author":"Salimans"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.595"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2014.224"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i2.25353"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01352"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413804"},{"key":"ref16","article-title":"No-reference quality assessment of contrast-distorted images using contrast enhancement","author":"Yan","year":"2019"},{"issue":"3","key":"ref17","first-page":"209","article-title":"Making a \u201ccompletely blind\u201d image quality analyzer","volume":"20","author":"Mittal","year":"2012","journal-title":"IEEE SPL"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/QOMEX.2009.5246972"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TBC.2014.2344471"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2014.2355716"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2017.2713945"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref23","article-title":"Imagereward: Learning and evaluating human preferences for text-to-image generation","author":"Xu","year":"2023"},{"key":"ref24","article-title":"Better aligning text-to-image models with human preference","author":"Wu","year":"2023"}],"event":{"name":"2024 IEEE International Conference on Multimedia and Expo (ICME)","location":"Niagara Falls, ON, Canada","start":{"date-parts":[[2024,7,15]]},"end":{"date-parts":[[2024,7,19]]}},"container-title":["2024 IEEE International Conference on Multimedia and Expo (ICME)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10685847\/10687354\/10688254.pdf?arnumber=10688254","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T06:25:30Z","timestamp":1727763930000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10688254\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7,15]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/icme57554.2024.10688254","relation":{},"subject":[],"published":{"date-parts":[[2024,7,15]]}}}