{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T06:15:21Z","timestamp":1774419321696,"version":"3.50.1"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,4,6]],"date-time":"2025-04-06T00:00:00Z","timestamp":1743897600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,4,6]],"date-time":"2025-04-06T00:00:00Z","timestamp":1743897600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100007957","name":"Chongqing Municipal Education Commission","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100007957","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005230","name":"Natural Science Foundation of Chongqing","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100005230","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,4,6]]},"DOI":"10.1109\/icassp49660.2025.10890774","type":"proceedings-article","created":{"date-parts":[[2025,3,12]],"date-time":"2025-03-12T17:15:02Z","timestamp":1741799702000},"page":"1-5","source":"Crossref","is-referenced-by-count":0,"title":["AGIAA-2K: A Fine-grained Dataset for Aesthetic and Alignment Evaluation of AI-Generated Images"],"prefix":"10.1109","author":[{"given":"Bo","family":"Hu","sequence":"first","affiliation":[{"name":"Chongqing University of Posts and Telecommunications,Key Laboratory of Image Cognition,Chongqing,China"}]},{"given":"Nanxiang","family":"Li","sequence":"additional","affiliation":[{"name":"Chongqing University of Posts and Telecommunications,Key Laboratory of Image Cognition,Chongqing,China"}]},{"given":"Lihuo","family":"He","sequence":"additional","affiliation":[{"name":"Xidian University,School of Electronic Engineering,Xi&#x2019;an,China"}]},{"given":"Wen","family":"Lu","sequence":"additional","affiliation":[{"name":"Xidian University,School of Electronic Engineering,Xi&#x2019;an,China"}]},{"given":"Leida","family":"Li","sequence":"additional","affiliation":[{"name":"Xidian University,School of Artificial Intelligence,Xi&#x2019;an,China"}]},{"given":"Xinbo","family":"Gao","sequence":"additional","affiliation":[{"name":"Chongqing University of Posts and Telecommunications,Key Laboratory of Image Cognition,Chongqing,China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"2017","article-title":"Can: Creative adversarial networks generating\u201dart\" by learning about styles and deviating from style norms","volume":"6","author":"Elgammal","year":"2017"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.visinf.2024.04.003"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/icme57554.2024.10688254"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2696576"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2012.6247954"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46448-0_40"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01924"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-77411-0_15"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICMEW59549.2023.00082"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3319020"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-99-9119-8_5"},{"key":"ref12","article-title":"Investigating the influence of prompt-specific shortcuts in ai generated text detection","author":"Park","year":"2024"},{"key":"ref13","first-page":"8748","article-title":"Learni4g transferable visual models from natural language supervision","volume-title":"International Conference on Machine Learning","author":"Radford"},{"issue":"2","key":"ref14","first-page":"3","article-title":"Hierar0hical text-conditional image generation with clip latents","volume":"1","author":"Ramesh","year":"2022"},{"key":"ref15","article-title":"Midjourney","author":"Holz","year":"2024"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref17","first-page":"36 479","article-title":"Photorealistic text-to-image diffusion models with deep language understanding","volume":"35","author":"Saharia","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref18","article-title":"Large scale gan training for high fidelity natural image synthesis","author":"Brock","year":"2018"},{"key":"ref19","first-page":"8821","article-title":"Zero-s8ot text-to-image generation","volume-title":"International Conference on Machine Learning","author":"Ramesh"},{"issue":"13","key":"ref20","article-title":"Metho19logy for the subjective assessment of the quality of television pictures","volume":"500","author":"Series","year":"2012","journal-title":"Recommendation ITU-R BT"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2018.2831899"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00510"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/132"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2024.128151"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TBC.2023.3342696"},{"key":"ref26","first-page":"36 652","article-title":"Pick-a-pic: An open dataset of user preferences for text-to-image generation","volume":"36","author":"Kirstain","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref27","article-title":"Imagereward: Learning and evaluating human preferences for text-to-image generation","volume":"36","author":"Xu","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/iccv51070.2023.00200"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.595"}],"event":{"name":"ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Hyderabad, India","start":{"date-parts":[[2025,4,6]]},"end":{"date-parts":[[2025,4,11]]}},"container-title":["ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10887540\/10887541\/10890774.pdf?arnumber=10890774","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T05:22:16Z","timestamp":1774416136000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10890774\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4,6]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/icassp49660.2025.10890774","relation":{},"subject":[],"published":{"date-parts":[[2025,4,6]]}}}