{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T20:22:58Z","timestamp":1740169378102,"version":"3.37.3"},"reference-count":56,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100003382","name":"Core Research for Evolutional Science and Technology, Japan","doi-asserted-by":"publisher","award":["JPMJCR19F2"],"award-info":[{"award-number":["JPMJCR19F2"]}],"id":[{"id":"10.13039\/501100003382","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2024.3486055","type":"journal-article","created":{"date-parts":[[2024,10,24]],"date-time":"2024-10-24T17:28:48Z","timestamp":1729790928000},"page":"162191-162203","source":"Crossref","is-referenced-by-count":0,"title":["Example-Based Conditioning for Text-to-Image Generative Models"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-6357-5640","authenticated-orcid":false,"given":"Atsushi","family":"Takada","sequence":"first","affiliation":[{"name":"Institute of Industrial Science, The University of Tokyo, Tokyo, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5547-3831","authenticated-orcid":false,"given":"Wataru","family":"Kawabe","sequence":"additional","affiliation":[{"name":"Institute of Industrial Science, The University of Tokyo, Tokyo, Japan"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4206-710X","authenticated-orcid":false,"given":"Yusuke","family":"Sugano","sequence":"additional","affiliation":[{"name":"Institute of Industrial Science, The University of Tokyo, Tokyo, Japan"}]}],"member":"263","reference":[{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.5555\/2969033.2969125"},{"key":"ref2","article-title":"Unsupervised representation learning with deep convolutional generative adversarial networks","author":"Radford","year":"2015","journal-title":"arXiv:1511.06434"},{"key":"ref3","article-title":"Conditional generative adversarial nets","author":"Mirza","year":"2014","journal-title":"arXiv:1411.1784"},{"key":"ref4","first-page":"2180","article-title":"InfoGAN: Interpretable representation learning by information maximizing generative adversarial nets","volume-title":"Proc. 30th Int. Conf. Neural Inf. Process. Syst.","volume":"29","author":"Chen"},{"volume-title":"Proc. Int. Conf. Learn. Represent. (ICLR)","author":"Karras","article-title":"Progressive growing of GANs for improved quality, stability, and variation","key":"ref5"},{"key":"ref6","article-title":"Improving the improved training of Wasserstein GANs: A consistency term and its dual effect","author":"Wei","year":"2018","journal-title":"arXiv:1803.01541"},{"key":"ref7","first-page":"2642","article-title":"Conditional image synthesis with auxiliary classifier GANs","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Odena"},{"key":"ref8","article-title":"ContraGAN: Contrastive learning for conditional image generation","author":"Kang","year":"2020","journal-title":"arXiv:2006.12681"},{"key":"ref9","first-page":"8821","article-title":"Zero-shot text-to-image generation","volume-title":"Proc. Int. Conf. Mach. Learn. (ICML)","author":"Ramesh"},{"key":"ref10","article-title":"Hierarchical text-conditional image generation with CLIP latents","author":"Ramesh","year":"2022","journal-title":"arXiv:2204.06125"},{"key":"ref11","article-title":"ERNIE-ViLG: Unified generative pre-training for bidirectional vision-language generation","author":"Zhang","year":"2021","journal-title":"arXiv:2112.15283"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref13","article-title":"Large language models are human-level prompt engineers","author":"Zhou","year":"2022","journal-title":"arXiv:2211.01910"},{"doi-asserted-by":"publisher","key":"ref14","DOI":"10.1145\/3301275.3302289"},{"key":"ref15","first-page":"12966","article-title":"Interactive label cleaning with example-based explanations","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Teso"},{"doi-asserted-by":"publisher","key":"ref16","DOI":"10.1609\/aimag.v35i4.2513"},{"issue":"8","key":"ref17","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"Radford","year":"2019","journal-title":"OpenAI Blog"},{"doi-asserted-by":"publisher","key":"ref18","DOI":"10.1109\/CVPR.2017.632"},{"doi-asserted-by":"publisher","key":"ref19","DOI":"10.1109\/ICCV.2017.244"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.1109\/CVPR.2018.00917"},{"key":"ref21","first-page":"700","article-title":"Unsupervised image-to-image translation networks","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst. (NIPS)","volume":"30","author":"Liu"},{"doi-asserted-by":"publisher","key":"ref22","DOI":"10.1007\/978-3-030-01219-9_11"},{"doi-asserted-by":"publisher","key":"ref23","DOI":"10.1007\/978-3-030-01249-6_50"},{"doi-asserted-by":"publisher","key":"ref24","DOI":"10.1109\/CVPR.2018.00916"},{"doi-asserted-by":"publisher","key":"ref25","DOI":"10.1007\/978-3-030-01246-5_3"},{"key":"ref26","first-page":"1060","article-title":"Generative adversarial text to image synthesis","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Reed"},{"doi-asserted-by":"publisher","key":"ref27","DOI":"10.1109\/ICCV.2017.629"},{"doi-asserted-by":"publisher","key":"ref28","DOI":"10.1109\/CVPR42600.2020.00790"},{"doi-asserted-by":"publisher","key":"ref29","DOI":"10.1109\/ICCVW54120.2021.00349"},{"doi-asserted-by":"publisher","key":"ref30","DOI":"10.1109\/CVPR.2018.00143"},{"key":"ref31","first-page":"217","article-title":"Learning what and where to draw","volume-title":"Proc. Conf. Neural Inf. Process. Syst. (NIPS)","author":"Reed"},{"key":"ref32","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume-title":"Proc. NIPS","volume":"33","author":"Ho"},{"key":"ref33","first-page":"2256","article-title":"Deep unsupervised learning using nonequilibrium thermodynamics","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Sohl-Dickstein"},{"volume-title":"Proc. Int. Conf. Learn. Represent. (ICLR)","author":"Song","article-title":"Denoising diffusion implicit models","key":"ref34"},{"key":"ref35","first-page":"8162","article-title":"Improved denoising diffusion probabilistic models","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","author":"Nichol"},{"doi-asserted-by":"publisher","key":"ref36","DOI":"10.1007\/978-3-319-46454-1_36"},{"key":"ref37","article-title":"Interactive image generation using scene graphs","author":"Mittal","year":"2019","journal-title":"arXiv:1905.03743"},{"doi-asserted-by":"publisher","key":"ref38","DOI":"10.1145\/3491102.3502141"},{"doi-asserted-by":"publisher","key":"ref39","DOI":"10.1145\/3411764.3445714"},{"doi-asserted-by":"publisher","key":"ref40","DOI":"10.1145\/3526113.3545638"},{"doi-asserted-by":"publisher","key":"ref41","DOI":"10.1145\/3185517"},{"volume-title":"Proc. Int. Conf. Intell. User Interfaces","first-page":"39","article-title":"Interactive machine learning","key":"ref42"},{"doi-asserted-by":"publisher","key":"ref43","DOI":"10.1006\/ijhc.2001.0499"},{"doi-asserted-by":"publisher","key":"ref44","DOI":"10.1145\/3180308.3180337"},{"doi-asserted-by":"publisher","key":"ref45","DOI":"10.1145\/3377325.3377483"},{"key":"ref46","article-title":"DeepVA: Bridging cognition and computation through semantic interaction and deep learning","author":"Bian","year":"2020","journal-title":"arXiv:2007.15800"},{"doi-asserted-by":"publisher","key":"ref47","DOI":"10.1145\/3397481.3450670"},{"doi-asserted-by":"publisher","key":"ref48","DOI":"10.1145\/3411764.3445165"},{"issue":"11","key":"ref49","first-page":"2579","article-title":"Visualizing data using t-SNE","volume":"9","author":"Van der Maaten","year":"2008","journal-title":"J. Mach. Learn. Res."},{"doi-asserted-by":"publisher","key":"ref50","DOI":"10.1080\/14786440109462720"},{"key":"ref51","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"volume-title":"Proc. 3rd Int. Conf. Learn. Represent.","author":"Jlb","article-title":"Adam: A method for stochastic optimization","key":"ref52"},{"doi-asserted-by":"publisher","key":"ref53","DOI":"10.1109\/ICVGIP.2008.47"},{"year":"2011","author":"Wah","article-title":"The Caltech-UCSD birds-200-2011 dataset","key":"ref54"},{"doi-asserted-by":"publisher","key":"ref55","DOI":"10.1016\/S0166-4115(08)62386-9"},{"key":"ref56","article-title":"Hard prompts made easy: Gradient-based discrete optimization for prompt tuning and discovery","author":"Wen","year":"2023","journal-title":"arXiv:2302.03668"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10380310\/10734215.pdf?arnumber=10734215","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T02:35:59Z","timestamp":1732674959000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10734215\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":56,"URL":"https:\/\/doi.org\/10.1109\/access.2024.3486055","relation":{},"ISSN":["2169-3536"],"issn-type":[{"type":"electronic","value":"2169-3536"}],"subject":[],"published":{"date-parts":[[2024]]}}}