{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T15:30:05Z","timestamp":1778081405930,"version":"3.51.4"},"reference-count":42,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","award":["2427478"],"award-info":[{"award-number":["2427478"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100000138","name":"U.S. Department of Education","doi-asserted-by":"publisher","award":["2229873"],"award-info":[{"award-number":["2229873"]}],"id":[{"id":"10.13039\/100000138","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iccv51701.2025.01803","type":"proceedings-article","created":{"date-parts":[[2026,4,29]],"date-time":"2026-04-29T19:45:49Z","timestamp":1777491949000},"page":"19396-19405","source":"Crossref","is-referenced-by-count":1,"title":["T2I-Copilot: A Training-Free Multi-Agent Text-to-Image System for Enhanced Prompt Interpretation and Interactive Generation"],"prefix":"10.1109","author":[{"given":"Chieh-Yun","family":"Chen","sequence":"first","affiliation":[{"name":"SHI Labs @ Georgia Tech"}]},{"given":"Min","family":"Shi","sequence":"additional","affiliation":[{"name":"SHI Labs @ Georgia Tech"}]},{"given":"Gong","family":"Zhang","sequence":"additional","affiliation":[{"name":"SHI Labs @ Georgia Tech"}]},{"given":"Humphrey","family":"Shi","sequence":"additional","affiliation":[{"name":"SHI Labs @ Georgia Tech"}]}],"member":"263","reference":[{"key":"ref1","volume-title":"Stable diffusion","year":"2024"},{"key":"ref2","article-title":"Imagen 3","author":"Baldridge","year":"2024","journal-title":"arXiv preprint"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3592116"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.52202\/079017-1847"},{"key":"ref5","article-title":"Januspro: Unified multimodal understanding and generation with data and model scaling","author":"Chen","year":"2025","journal-title":"arXiv preprint"},{"key":"ref6","article-title":"Region-aware text-to-image generation via hard binding and soft refinement","author":"Chen","year":"2024","journal-title":"arXiv preprint"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.52202\/075280-2923"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.595"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.52202\/079017-4372"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.52202\/075280-1594"},{"key":"ref11","volume-title":"FLUX","year":"2024"},{"key":"ref12","article-title":"GenAI-bench: A holistic benchmark for compositional text-to-visual generation","volume-title":"Synthetic Data for Computer Vision Workshop @ CVPR","author":"Li","year":"2024"},{"key":"ref13","article-title":"Playground v2.5: Three insights towards enhancing aesthetic quality in text-to-image generation","author":"Li","year":"2024","journal-title":"arXiv preprint"},{"key":"ref14","article-title":"Hunyuan-DiT: A powerful multi-resolution diffusion transformer with fine-grained chinese understanding","author":"Li","year":"2024","journal-title":"arXiv preprint"},{"key":"ref15","article-title":"Llmgrounded diffusion: Enhancing prompt understanding of text-to-image diffusion models with large language models","author":"Lian","year":"2024","journal-title":"Transactions on Machine Learning Research (TMLR)"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72673-6_20"},{"key":"ref17","article-title":"Improving text-to-image consistency via automatic prompt optimization","author":"Ma\u00f1as","year":"2024","journal-title":"Transactions on Machine Learning Research (TMLR)"},{"key":"ref18","volume-title":"Midjourney v6.1","year":"2024"},{"key":"ref19","article-title":"Personalized and sequential text-to-image generation","author":"Nabati","year":"2024","journal-title":"arXiv preprint"},{"key":"ref20","volume-title":"DALL\u2022E 3","year":"2024"},{"key":"ref21","volume-title":"GPT-4o","year":"2024"},{"key":"ref22","article-title":"SDXL: Improving latent diffusion models for high-resolution image synthesis","volume-title":"Proceedings of the International Conference on Learning Representations (ICLR)","author":"Podell","year":"2024"},{"key":"ref23","article-title":"DiffusionGPT: Llm-driven text-to-image generation system","author":"Qin","year":"2024","journal-title":"arXiv preprint"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.52202\/075280-0157"},{"key":"ref25","volume-title":"Recraft v3","year":"2024"},{"key":"ref26","article-title":"Grounded sam: Assembling open-world models for diverse visual tasks","author":"Ren","year":"2024","journal-title":"arXiv preprint"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.52202\/068431-2643"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.findings-emnlp.320"},{"key":"ref29","article-title":"Hugginggpt: Solving AI tasks with chatgpt and its friends in huggingface","author":"Shen","year":"2023","journal-title":"Advances in Neural Information Processing Systems (NeurIPS)"},{"key":"ref30","volume-title":"Kolors: Effective training of diffusion model for photorealistic text-to-image synthesis","year":"2024"},{"key":"ref31","volume-title":"LangGraph","year":"2024"},{"key":"ref32","volume-title":"Lumina-image 2.0: A unified and efficient image generative model","year":"2025"},{"key":"ref33","volume-title":"Omost github","year":"2024"},{"key":"ref34","article-title":"Genartist: Multimodal LLM as an agent for unified image generation and editing","author":"Wang","year":"2024","journal-title":"Advances in Neural Information Processing Systems (NeurIPS)"},{"key":"ref35","article-title":"Visual chatgpt: Talking, drawing and editing with visual foundation models","author":"Wu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00605"},{"key":"ref37","article-title":"Human preference score v 2: A solid benchmark for evaluating human preferences of text-to-image synthesis","author":"Wu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref38","article-title":"Imagereward: Learning and evaluating human preferences for text-to-image generation","author":"Xu","year":"2023","journal-title":"Advances in Neural Information Processing Systems (NeurIPS)"},{"key":"ref39","article-title":"React: Synergizing reasoning and acting in language models","volume-title":"Proceedings of the International Conference on Learning Representations (ICLR)","author":"Yao","year":"2023"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.52202\/079017-1677"},{"key":"ref41","article-title":"Golden noise for diffusion models: A learning framework","author":"Zhou","year":"2024","journal-title":"arXiv preprint"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73636-0_12"}],"event":{"name":"2025 IEEE\/CVF International Conference on Computer Vision (ICCV)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,10,19]]},"end":{"date-parts":[[2025,10,25]]}},"container-title":["2025 IEEE\/CVF International Conference on Computer Vision (ICCV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11443115\/11443287\/11445051.pdf?arnumber=11445051","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T05:16:06Z","timestamp":1777612566000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11445051\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/iccv51701.2025.01803","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}