{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T23:29:05Z","timestamp":1776122945068,"version":"3.50.1"},"reference-count":61,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"1","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62172398"],"award-info":[{"award-number":["62172398"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Visual. Comput. Graphics"],"published-print":{"date-parts":[[2024,1]]},"DOI":"10.1109\/tvcg.2023.3326913","type":"journal-article","created":{"date-parts":[[2023,10,25]],"date-time":"2023-10-25T13:53:15Z","timestamp":1698241995000},"page":"284-294","source":"Crossref","is-referenced-by-count":36,"title":["Let the Chart Spark: Embedding Semantic Context into Chart with Text-to-Image Generative Model"],"prefix":"10.1109","volume":"30","author":[{"given":"Shishi","family":"Xiao","sequence":"first","affiliation":[{"name":"Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"}]},{"given":"Suizi","family":"Huang","sequence":"additional","affiliation":[{"name":"Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"}]},{"given":"Yue","family":"Lin","sequence":"additional","affiliation":[{"name":"Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"}]},{"given":"Yilin","family":"Ye","sequence":"additional","affiliation":[{"name":"Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"}]},{"given":"Wei","family":"Zeng","sequence":"additional","affiliation":[{"name":"Hong Kong University of Science and Technology (Guangzhou), Guangzhou, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1145\/1753326.1753716"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2012.197"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2015.2467732"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2013.234"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3025453.3025512"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01764"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2021.3092680"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1177\/1473871617724212"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1145\/3592116"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2019.2934810"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1111\/cgf.14004"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2019.2934785"},{"key":"ref13","first-page":"8780","article-title":"Diffusion models beat gans on image synthesis","volume-title":"Proc. NIPS","volume":"34","author":"Dhariwal","year":"2021"},{"key":"ref14","first-page":"1","article-title":"The chartjunk debate","author":"Few","year":"2011","journal-title":"Visual Business Intelligence Newsletter"},{"key":"ref15","first-page":"5207","article-title":"CLIPDraw: Exploring text-to-drawing synthesis through language-image encoders","volume-title":"Proc. NIPS","volume":"35","author":"Frans","year":"2022"},{"key":"ref16","article-title":"An image is worth one word: Personalizing text-to-image generation using textual inversion","volume-title":"Proc. ICML","author":"Gal","year":"2023"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3528223.3530164"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/2702123.2702275"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/2702123.2702545"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.4324\/9781315580951-16"},{"key":"ref21","article-title":"Prompt-to-prompt image editing with cross attention control","volume-title":"Proc. ICML","author":"Hertz","year":"2023"},{"key":"ref22","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume-title":"Proc. NIPS","volume":"33","author":"Ho","year":"2020"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1201\/9781003222361"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2011.175"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3592123"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00453"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2016.2598620"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1145\/3313831.3376443"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2021.3074582"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/3491102.3502048"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02156"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1145\/3313831.3376263"},{"key":"ref33","article-title":"Efficient estimation of word representations in vector space","author":"Mikolov","year":"2013","journal-title":"arXiv preprint arXiv"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1177\/1473871611415996"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2012.221"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2020.3023013"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2020.3030448"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19797-0_3"},{"key":"ref39","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. ICML","author":"Radford","year":"2021"},{"key":"ref40","article-title":"Hierarchical text-conditional image generation with CLIP latents","author":"Ramesh","year":"2022","journal-title":"arXiv preprint arXiv"},{"key":"ref41","first-page":"8821","article-title":"Zero-shot text-to-image generation","volume-title":"Proc. ICML","author":"Ramesh","year":"2021"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-05936-0_1"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52729.2023.02155"},{"key":"ref45","first-page":"36479","article-title":"Photorealistic text-to-image diffusion models with deep language understanding","volume-title":"Proc. NIPS","author":"Saharia","year":"2022"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1111\/cgf.14841"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02157"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2022.3209486"},{"key":"ref49","volume-title":"Denoising diffusion implicit models","author":"Song","year":"2022"},{"key":"ref50","first-page":"16857","article-title":"MPNet: Masked and permuted pre-training for language understanding","volume-title":"Proc. NIPS","volume":"33","author":"Song","year":"2020"},{"key":"ref51","volume-title":"The Visual Display of Quantitative Information","author":"Tufte","year":"2001"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2022.3209357"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2019.2934398"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1145\/3173574.3173909"},{"key":"ref55","article-title":"viz2viz: Prompt-driven stylized visualization generation using a diffusion model","author":"Wu","year":"2023","journal-title":"arXiv preprint arXiv"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1145\/3173574.3173797"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1111\/cgf.14832"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/tvcg.2022.3209447"},{"key":"ref59","article-title":"Scaling autoregressive models for content-rich text-to-image generation","author":"Yu","year":"2022","journal-title":"arXiv preprint arXiv"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1145\/3313831.3376172"},{"key":"ref61","article-title":"Adding conditional control to text-to-image diffusion models","author":"Zhang","year":"2023","journal-title":"arXiv preprint arXiv"}],"container-title":["IEEE Transactions on Visualization and Computer Graphics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/2945\/10373160\/10296520.pdf?arnumber=10296520","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,27]],"date-time":"2026-01-27T05:47:29Z","timestamp":1769492849000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10296520\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,1]]},"references-count":61,"journal-issue":{"issue":"1"},"URL":"https:\/\/doi.org\/10.1109\/tvcg.2023.3326913","relation":{},"ISSN":["1077-2626","1941-0506","2160-9306"],"issn-type":[{"value":"1077-2626","type":"print"},{"value":"1941-0506","type":"electronic"},{"value":"2160-9306","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,1]]}}}