{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T16:08:14Z","timestamp":1772554094495,"version":"3.50.1"},"reference-count":148,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"10","license":[{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,1]],"date-time":"2025-10-01T00:00:00Z","timestamp":1759276800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Visual. Comput. Graphics"],"published-print":{"date-parts":[[2025,10]]},"DOI":"10.1109\/tvcg.2025.3585077","type":"journal-article","created":{"date-parts":[[2025,7,1]],"date-time":"2025-07-01T13:38:24Z","timestamp":1751377104000},"page":"9464-9483","source":"Crossref","is-referenced-by-count":3,"title":["A Survey on Quality Metrics for Text-to-Image Generation"],"prefix":"10.1109","volume":"31","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-8642-2789","authenticated-orcid":false,"given":"Sebastian","family":"Hartwig","sequence":"first","affiliation":[{"name":"Visual Computing Group, Ulm University, Ulm, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5766-7215","authenticated-orcid":false,"given":"Dominik","family":"Engel","sequence":"additional","affiliation":[{"name":"Visual Computing Group, Ulm University, Ulm, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-6524-0715","authenticated-orcid":false,"given":"Leon","family":"Sick","sequence":"additional","affiliation":[{"name":"Visual Computing Group, Ulm University, Ulm, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5898-8152","authenticated-orcid":false,"given":"Hannah","family":"Kniesel","sequence":"additional","affiliation":[{"name":"Visual Computing Group, Ulm University, Ulm, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-9602-3366","authenticated-orcid":false,"given":"Tristan","family":"Payer","sequence":"additional","affiliation":[{"name":"Visual Computing Group, Ulm University, Ulm, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-0472-229X","authenticated-orcid":false,"given":"Poonam","family":"Poonam","sequence":"additional","affiliation":[{"name":"Visual Computing Group, Ulm University, Ulm, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4436-918X","authenticated-orcid":false,"given":"Michael","family":"Gl\u00f6ckler","sequence":"additional","affiliation":[{"name":"Visual Computing Group, Ulm University, Ulm, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3886-8799","authenticated-orcid":false,"given":"Alex","family":"B\u00e4uerle","sequence":"additional","affiliation":[{"name":"Axiom, Walnut Creek, CA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7857-5512","authenticated-orcid":false,"given":"Timo","family":"Ropinski","sequence":"additional","affiliation":[{"name":"Visual Computing Group, Ulm University, Ulm, Germany"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/3528223.3530104"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2023.3327168"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2024.3397712"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2024.3365804"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3461353.3461388"},{"key":"ref6","article-title":"Text-to-image diffusion models in generative AI: A survey","author":"Zhang","year":"2023"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3261988"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1111\/cgf.15063"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3613904.3642165"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.23919\/TST.2017.8195348"},{"key":"ref11","first-page":"25278","article-title":"Laion-5B: An open large-scale dataset for training next generation image-text models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Schuhmann"},{"key":"ref12","article-title":"Imagereward: Learning and evaluating human preferences for text-to-image generation","author":"Xu","year":"2023"},{"key":"ref13","article-title":"Pick-a-Pic: An open dataset of user preferences for text-to-image generation","author":"Kirstain","year":"2023"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/iccv51070.2023.00200"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1111\/cgf.14613"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00697"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19818-2_9"},{"key":"ref18","first-page":"2234","article-title":"Improved techniques for training GANs","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Salimans"},{"key":"ref19","first-page":"6629","article-title":"Gans trained by a two time-scale update rule converge to a local nash equilibrium","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Heusel"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20059-5_34"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/WACV57701.2024.00287"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72998-0_18"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00517"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00200"},{"issue":"8","key":"ref25","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI blog"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2010.11929"},{"key":"ref27","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref28","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref29","first-page":"19730","article-title":"BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref30","article-title":"When and why vision-language models behave like bags-of-words, and what to do about it?","volume-title":"Proc. 11th Int. Conf. Learn. Representations","author":"Yuksekgonul"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.595"},{"key":"ref32","article-title":"Benchmarking spatial relationships in text-to-image generation","author":"Gokhale","year":"2022"},{"key":"ref33","article-title":"GPT-4 technical report","author":"OpenAI","year":"2023"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20080-9_42"},{"key":"ref35","article-title":"Real-time flying object detection with YOLOv8","author":"Reis","year":"2023"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72989-8_12"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46454-1_24"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00608"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1220"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1654"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.eval4nlp-1.4"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.308"},{"key":"ref44","first-page":"35072","article-title":"Mutual information divergence: A unified metric for multimodal generative models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kim"},{"key":"ref45","first-page":"375","article-title":"Benchmark for compositional text-to-image synthesis","volume-title":"Proc. 35th Conf. Neural Inf. Process. Syst. Datasets Benchmarks Track","author":"Park"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.56"},{"key":"ref47","article-title":"Clove: Encoding compositional language in contrastive vision-language models","author":"Castro","year":"2024"},{"key":"ref48","article-title":"Dreamsim: Learning new dimensions of human visual similarity using synthetic data","author":"Fu","year":"2023"},{"key":"ref49","article-title":"Cobra effect in reference-free image captioning metrics","author":"Ma","year":"2024"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00143"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.01835"},{"key":"ref52","article-title":"T2i-compbench: A comprehensive benchmark for open-world compositional text-to-image generation","author":"Huang","year":"2023"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2020.3021209"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3612706"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01866"},{"key":"ref56","first-page":"1601","article-title":"What you see is what you read? Improving text-image alignment evaluation","volume":"36","author":"Yarom","year":"2023","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72673-6_20"},{"key":"ref58","article-title":"Understanding and evaluating human preferences for AI generated images with instruction tuning","author":"Wang","year":"2024"},{"key":"ref59","article-title":"Divide, evaluate, and refine: Evaluating and improving text-to-image alignment with iterative VQA feedback","author":"Singh","year":"2023"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1145\/3592116"},{"key":"ref61","first-page":"23075","article-title":"LLMScore: Unveiling the power of large language models in text-to-image synthesis evaluation","volume-title":"Proc. 37th Conf. Neural Inf. Process. Syst.","author":"Lu"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.663"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1145\/3447548.3467198"},{"key":"ref64","first-page":"4783","article-title":"Demystifying MMD GANs","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Bi\u0144kowski"},{"key":"ref65","first-page":"1895","article-title":"Revisiting classifier two-sample tests","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lopez-Paz"},{"key":"ref66","first-page":"5234","article-title":"Assessing generative models via precision and recall","volume-title":"Proc. Adv. neural Inf. Process. Syst.","author":"Sajjadi"},{"key":"ref67","first-page":"12268","article-title":"Classification accuracy score for conditional generative models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ravuri"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02155"},{"key":"ref69","first-page":"3927","article-title":"Improved precision and recall metric for assessing generative models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kynk\u00e4\u00e4nniemi"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58621-8_22"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i2.25353"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00453"},{"key":"ref73","article-title":"Microsoft coco captions: Data collection and evaluation server","author":"Chen","year":"2015"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00166"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.303"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1514"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00147"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"ref80","first-page":"6616","article-title":"Large-scale adversarial training for vision-and-language representation learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Gan"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00553"},{"key":"ref82","first-page":"5583","article-title":"ViLT: Vision-and-language transformer without convolution or region supervision","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","author":"Kim"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00475"},{"key":"ref84","article-title":"PaLi: A jointly-scaled multilingual language-image model","author":"Chen","year":"2022"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58548-8_29"},{"key":"ref87","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Brown"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.171"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.488"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.670"},{"issue":"70","key":"ref91","first-page":"1","article-title":"Scaling instruction-finetuned language models","volume":"25","author":"Chung","year":"2022","journal-title":"J. Mach. Learn. Res."},{"key":"ref92","first-page":"36479","article-title":"Photorealistic text-to-image diffusion models with deep language understanding","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Saharia"},{"key":"ref93","article-title":"Imagen video: High definition video generation with diffusion models","author":"Ho","year":"2022"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1613\/jair.3994"},{"key":"ref95","first-page":"1","article-title":"Bertscore: Evaluating text generation with BERT","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhang"},{"key":"ref96","first-page":"13","article-title":"ViLBERT: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Lu"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.3115\/1073083.1073135"},{"key":"ref98","first-page":"74","article-title":"ROUGE: A package for automatic evaluation of summaries","volume-title":"Proc. Text Summarization Branches Out","author":"Lin"},{"key":"ref99","first-page":"65","article-title":"METEOR: An automatic metric for MT evaluation with improved correlation with human judgments","volume-title":"Proc. ACL Workshop Intrinsic Extrinsic Eval. Measures Mach. Transl. Summarization","author":"Banerjee"},{"key":"ref100","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin"},{"key":"ref101","article-title":"A note on the inception score","author":"Barratt","year":"2018"},{"key":"ref102","first-page":"839","article-title":"Mode regularized generative adversarial networks","volume-title":"Proc. 5th Int. Conf. Learn. Representations","author":"Che"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.7551\/mitpress\/7503.003.0069"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref105","first-page":"1143","article-title":"Im2Text: Describing images using 1 million captioned photographs","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ordonez"},{"key":"ref106","first-page":"442","article-title":"Training and evaluating multimodal word embeddings with large-scale web annotated images","volume-title":"Proc. 30th Int. Conf. Neural Inf. Process. Syst.","author":"Mao"},{"key":"ref107","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1238"},{"key":"ref108","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00904"},{"key":"ref109","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00356"},{"key":"ref110","first-page":"139","article-title":"Collecting image annotations using amazon\u2019s mechanical turk","volume-title":"Proc. NAACL HLT 2010 Workshop Creating Speech Lang. Data Amazon\u2019s Mech. Turk","author":"Rashtchian"},{"key":"ref111","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2013.387"},{"key":"ref112","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.279"},{"key":"ref113","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00688"},{"key":"ref114","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0987-1"},{"key":"ref115","article-title":"A very preliminary analysis of DALL-E 2","author":"Marcus","year":"2022"},{"key":"ref116","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00283"},{"key":"ref117","first-page":"1","article-title":"Training-free structured diffusion guidance for compositional text-to-image synthesis","volume-title":"Proc. 11th Int. Conf. Learn. Representations","author":"Feng"},{"key":"ref118","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02157"},{"key":"ref119","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"key":"ref120","article-title":"Large scale GAN training for high fidelity natural image synthesis","author":"Brock","year":"2018"},{"key":"ref121","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00813"},{"key":"ref122","article-title":"The PASCAL visual object classes challenge 2008 (VOC2008) results","author":"Everingham","year":"2008"},{"key":"ref123","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-020-01316-z"},{"key":"ref124","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1085"},{"key":"ref125","doi-asserted-by":"publisher","DOI":"10.1145\/3357384.3358001"},{"key":"ref126","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01101"},{"key":"ref127","first-page":"1","article-title":"VL-BERT: Pre-training of generic visual-linguistic representations","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Su"},{"key":"ref128","first-page":"1931","article-title":"Unifying vision-and-language tasks via text generation","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","author":"Cho"},{"key":"ref129","doi-asserted-by":"publisher","DOI":"10.1145\/219717.219748"},{"key":"ref130","first-page":"8821","article-title":"Zero-shot text-to-image generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ramesh"},{"key":"ref131","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref132","first-page":"69981","article-title":"Holistic evaluation of text-to-image models","volume-title":"Proc. 37th Conf. Neural Inf. Process. Syst. Datasets Benchmarks Track","author":"Lee"},{"key":"ref133","article-title":"Visual entailment task for visually-grounded language learning","author":"Xie","year":"2019"},{"key":"ref134","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01761"},{"key":"ref135","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.02521"},{"key":"ref136","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01050"},{"key":"ref137","first-page":"31096","article-title":"Sugarcrepe: Fixing hackable benchmarks for vision-language compositionality","volume-title":"Proc. 37th Conf. Neural Inf. Process. Syst. \u2014 Datasets & Benchmarks Track (NeurIPS)","author":"Hsieh"},{"key":"ref138","first-page":"46433","article-title":"cola: A benchmark for compositional text-to-image retrieval","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ray"},{"key":"ref139","doi-asserted-by":"publisher","DOI":"10.1016\/j.heliyon.2023.e16757"},{"key":"ref140","doi-asserted-by":"publisher","DOI":"10.1145\/3626235"},{"key":"ref141","article-title":"Hypernymy understanding evaluation of text-to-image models via wordnet hierarchy","author":"Baryshnikov","year":"2023"},{"key":"ref142","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01372"},{"key":"ref143","article-title":"Video diffusion models","author":"Ho","year":"2022"},{"key":"ref144","article-title":"Spot! Revisiting video-language models for event understanding","author":"Zhang","year":"2023"},{"key":"ref145","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00037"},{"key":"ref146","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01218"},{"key":"ref147","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.02033"},{"key":"ref148","first-page":"8406","article-title":"Prolificdreamer: High-fidelity and diverse text-to-3D generation with variational score distillation","volume-title":"Proc. 37th Conf. Neural Inf. Process. Syst.","author":"Wang"}],"container-title":["IEEE Transactions on Visualization and Computer Graphics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/2945\/11151252\/11062594.pdf?arnumber=11062594","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,5]],"date-time":"2025-09-05T19:17:37Z","timestamp":1757099857000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11062594\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10]]},"references-count":148,"journal-issue":{"issue":"10"},"URL":"https:\/\/doi.org\/10.1109\/tvcg.2025.3585077","relation":{},"ISSN":["1077-2626","1941-0506","2160-9306"],"issn-type":[{"value":"1077-2626","type":"print"},{"value":"1941-0506","type":"electronic"},{"value":"2160-9306","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,10]]}}}