{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,19]],"date-time":"2026-03-19T17:50:07Z","timestamp":1773942607592,"version":"3.50.1"},"reference-count":37,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100003708","name":"Research and Development Project, Building a Data\/AI-Based Problem-Solving System, at the Korea Institute of Science and Technology Information (KISTI), South Korea","doi-asserted-by":"publisher","award":["K-23-L04-C05-S01"],"award-info":[{"award-number":["K-23-L04-C05-S01"]}],"id":[{"id":"10.13039\/501100003708","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2023.3348778","type":"journal-article","created":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T19:51:03Z","timestamp":1704138663000},"page":"2660-2673","source":"Crossref","is-referenced-by-count":10,"title":["Optimizing Prompts Using In-Context Few-Shot Learning for Text-to-Image Generative Models"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0009-0004-9419-448X","authenticated-orcid":false,"given":"Seunghun","family":"Lee","sequence":"first","affiliation":[{"name":"School of Electronic and Electrical Engineering, Kyungpook National University, Daegu, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-6665-3739","authenticated-orcid":false,"given":"Jihoon","family":"Lee","sequence":"additional","affiliation":[{"name":"School of Electronic and Electrical Engineering, Kyungpook National University, Daegu, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-6620-6641","authenticated-orcid":false,"given":"Chan Ho","family":"Bae","sequence":"additional","affiliation":[{"name":"School of Electronic and Electrical Engineering, Kyungpook National University, Daegu, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4821-3390","authenticated-orcid":false,"given":"Myung-Seok","family":"Choi","sequence":"additional","affiliation":[{"name":"AI Data Research Center, Korea Institute of Science and Technology Information (KISTI), Daejeon, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5142-6106","authenticated-orcid":false,"given":"Ryong","family":"Lee","sequence":"additional","affiliation":[{"name":"AI Data Research Center, Korea Institute of Science and Technology Information (KISTI), Daejeon, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9487-5649","authenticated-orcid":false,"given":"Sangtae","family":"Ahn","sequence":"additional","affiliation":[{"name":"School of Electronic and Electrical Engineering, Kyungpook National University, Daegu, South Korea"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Recent advances and trends in multimodal deep learning: A review","author":"Summaira","year":"2021","journal-title":"arXiv:2105.11087"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.3389\/fpls.2023.1117478"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3299314"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3206662"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3310015"},{"key":"ref6","first-page":"8821","article-title":"Zero-shot text-to-image generation","volume-title":"Proc. Int. Conf. Mach. Learn. (PMLR)","author":"Ramesh"},{"key":"ref7","article-title":"Hierarchical text-conditional image generation with CLIP latents","author":"Ramesh","year":"2022","journal-title":"arXiv:2204.06125"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref9","first-page":"1","article-title":"Towards a rigorous science of interpretable machine learning","author":"Doshi-Velez","year":"2017","journal-title":"arXiv:1702.08608"},{"key":"ref10","first-page":"1","article-title":"Hard prompts made easy: Gradient-based discrete optimization for prompt tuning and discovery","author":"Wen","year":"2023","journal-title":"arXiv:2302.03668"},{"key":"ref11","first-page":"36479","article-title":"Photorealistic text-to-image diffusion models with deep language understanding","volume-title":"Proc. Adv. Neural Inf. Process Syst.","volume":"35","author":"Saharia"},{"key":"ref12","first-page":"8780","article-title":"Diffusion models beat GANs on image synthesis","volume":"11","author":"Dhariwal","year":"2021","journal-title":"Adv. Neural Inf. Process Syst."},{"key":"ref13","article-title":"Exploring the limits of language modeling","author":"Jozefowicz","year":"2016","journal-title":"arXiv:1602.02410"},{"key":"ref14","article-title":"An image is worth 16\u00d716 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020","journal-title":"arXiv:2010.11929"},{"key":"ref15","article-title":"An image is worth one word: Personalizing text-to-image generation using textual inversion","author":"Gal","year":"2022","journal-title":"arXiv:2208.01618"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02155"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3560815"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1145\/3491102.3501825"},{"key":"ref19","article-title":"Learning transferable visual models from natural language supervision","author":"Radford","year":"2021","journal-title":"arXiv:2103.00020"},{"key":"ref20","article-title":"Best prompts for text-to-image models and how to find them","author":"Pavlichenko","year":"2022","journal-title":"arXiv:2209.11711"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.222"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.80"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"ref24","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Brown"},{"key":"ref25","article-title":"GPT understands, too","author":"Liu","year":"2021","journal-title":"arXiv:2103.10385"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-short.94"},{"key":"ref27","article-title":"Optimizing prompts for text-to-image generation","author":"Hao","year":"2022","journal-title":"arXiv:2212.09611"},{"issue":"8","key":"ref28","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI Blog"},{"key":"ref29","article-title":"GPT-J-6B: A 6 billion parameter autoregressive language model","author":"Wang","year":"2021"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/3617680"},{"key":"ref31","first-page":"1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2019","journal-title":"J. Mach. Learn. Res."},{"key":"ref32","article-title":"Grounding DINO: Marrying DINO with grounded pre-training for open-set object detection","author":"Liu","year":"2023","journal-title":"arXiv:2303.05499"},{"key":"ref33","article-title":"Segment anything","author":"Kirillov","year":"2023","journal-title":"arXiv:2304.02643"},{"key":"ref34","first-page":"3836","article-title":"Adding conditional control to text-to-image diffusion models","volume-title":"Proc. IEEE\/CVF Int. Conf. Comput. Vis.","author":"Zhang"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.595"},{"key":"ref36","first-page":"6627","article-title":"GANs trained by a two time-scale update rule converge to a local Nash equilibrium","volume-title":"Proc. Adv. Neural Inf. Process Syst. (NIPS)","author":"Heusel"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.308"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/10380310\/10378642.pdf?arnumber=10378642","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,12]],"date-time":"2024-01-12T23:55:12Z","timestamp":1705103712000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10378642\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":37,"URL":"https:\/\/doi.org\/10.1109\/access.2023.3348778","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}