{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,2]],"date-time":"2026-01-02T07:38:08Z","timestamp":1767339488202,"version":"3.40.4"},"reference-count":71,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62441231","62472065","62293542","U23B2010"],"award-info":[{"award-number":["62441231","62472065","62293542","U23B2010"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Liao Ning Province Science and Technology Plan","award":["2023JH26","10200016"],"award-info":[{"award-number":["2023JH26","10200016"]}]},{"name":"Dalian City Science and Technology Innovation","award":["2023JJ11CG001"],"award-info":[{"award-number":["2023JJ11CG001"]}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["DUT22ZD210"],"award-info":[{"award-number":["DUT22ZD210"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Image Process."],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tip.2025.3558668","type":"journal-article","created":{"date-parts":[[2025,4,14]],"date-time":"2025-04-14T17:41:39Z","timestamp":1744652499000},"page":"2544-2559","source":"Crossref","is-referenced-by-count":3,"title":["CharacterFactory: Sampling Consistent Characters With GANs for Diffusion Models"],"prefix":"10.1109","volume":"34","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6908-5485","authenticated-orcid":false,"given":"Qinghe","family":"Wang","sequence":"first","affiliation":[{"name":"School of Information and Communication Engineering, Dalian University of Technology, Dalian, China"}]},{"given":"Baolu","family":"Li","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, Dalian University of Technology, Dalian, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7202-6865","authenticated-orcid":false,"given":"Xiaomin","family":"Li","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, Dalian University of Technology, Dalian, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0316-5404","authenticated-orcid":false,"given":"Bing","family":"Cao","sequence":"additional","affiliation":[{"name":"College of Intelligence and Computing, Tianjin University, Tianjin, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0814-9876","authenticated-orcid":false,"given":"Liqian","family":"Ma","sequence":"additional","affiliation":[{"name":"ZMO.AI Inc., Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6668-9758","authenticated-orcid":false,"given":"Huchuan","family":"Lu","sequence":"additional","affiliation":[{"name":"School of Information and Communication Engineering, Dalian University of Technology, Dalian, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3168-3505","authenticated-orcid":false,"given":"Xu","family":"Jia","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, Dalian University of Technology, Dalian, China"}]}],"member":"263","reference":[{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref2","article-title":"PixArt-\u03b1: Fast training of diffusion transformer for photorealistic text-to-image synthesis","author":"Chen","year":"2023","journal-title":"arXiv:2310.00426"},{"key":"ref3","article-title":"Text-to-image diffusion models in generative AI: A survey","author":"Zhang","year":"2023","journal-title":"arXiv:2303.07909"},{"doi-asserted-by":"publisher","key":"ref4","DOI":"10.1109\/CVPR.2019.00453"},{"doi-asserted-by":"publisher","key":"ref5","DOI":"10.1007\/978-3-031-19787-1_1"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.1109\/TIP.2018.2869695"},{"doi-asserted-by":"publisher","key":"ref7","DOI":"10.1007\/978-3-031-72983-6_15"},{"doi-asserted-by":"publisher","key":"ref8","DOI":"10.1109\/TIP.2023.3340522"},{"key":"ref9","article-title":"InstantStyle-plus: Style transfer with content-preserving in text-to-image generation","author":"Wang","year":"2024","journal-title":"arXiv:2407.00788"},{"doi-asserted-by":"publisher","key":"ref10","DOI":"10.1109\/TIP.2022.3229614"},{"key":"ref11","article-title":"PhotoMaker: Customizing realistic human photos via stacked ID embedding","author":"Li","year":"2023","journal-title":"arXiv:2312.04461"},{"key":"ref12","article-title":"StableIdentity: Inserting anybody into anywhere at first sight","author":"Wang","year":"2024","journal-title":"arXiv:2401.15975"},{"doi-asserted-by":"publisher","key":"ref13","DOI":"10.1609\/aaai.v38i2.27891"},{"key":"ref14","article-title":"InstantID: Zero-shot identity-preserving generation in seconds","author":"Wang","year":"2024","journal-title":"arXiv:2401.07519"},{"key":"ref15","article-title":"IP-adapter: Text compatible image prompt adapter for text-to-image diffusion models","author":"Ye","year":"2023","journal-title":"arXiv:2308.06721"},{"doi-asserted-by":"publisher","key":"ref16","DOI":"10.1109\/ICCV51070.2023.01461"},{"key":"ref17","article-title":"An image is worth one word: Personalizing text-to-image generation using textual inversion","author":"Gal","year":"2022","journal-title":"arXiv:2208.01618"},{"doi-asserted-by":"publisher","key":"ref18","DOI":"10.1109\/CVPR52729.2023.02155"},{"key":"ref19","article-title":"The chosen one: Consistent characters in text-to-image diffusion models","author":"Avrahami","year":"2023","journal-title":"arXiv:2311.10093"},{"doi-asserted-by":"publisher","key":"ref20","DOI":"10.1145\/3658157"},{"key":"ref21","article-title":"StoryDiffusion: Consistent self-attention for long-range image and video generation","author":"Zhou","year":"2024","journal-title":"arXiv:2405.01434"},{"key":"ref22","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Radford"},{"key":"ref23","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Ho"},{"key":"ref24","article-title":"Denoising diffusion implicit models","author":"Song","year":"2020","journal-title":"arXiv:2010.02502"},{"key":"ref25","article-title":"GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models","author":"Nichol","year":"2021","journal-title":"arXiv:2112.10741"},{"key":"ref26","article-title":"Hierarchical text-conditional image generation with CLIP latents","author":"Ramesh","year":"2022","journal-title":"arXiv:2204.06125"},{"key":"ref27","article-title":"ShoeModel: Learning to wear on the user-specified shoes via diffusion model","author":"Chen","year":"2024","journal-title":"arXiv:2404.04833"},{"doi-asserted-by":"publisher","key":"ref28","DOI":"10.1109\/TIP.2024.3383776"},{"key":"ref29","first-page":"25278","article-title":"LAION-5B: An open large-scale dataset for training next generation image-text models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Schuhmann"},{"key":"ref30","article-title":"LAION-400M: Open dataset of CLIP-filtered 400 million image-text pairs","author":"Schuhmann","year":"2021","journal-title":"arXiv:2111.02114"},{"doi-asserted-by":"publisher","key":"ref31","DOI":"10.1109\/ICCV51070.2023.00387"},{"doi-asserted-by":"publisher","key":"ref32","DOI":"10.1109\/ICCV51070.2023.00355"},{"doi-asserted-by":"publisher","key":"ref33","DOI":"10.1145\/3658150"},{"key":"ref34","article-title":"Seek for incantations: Towards accurate text-to-image diffusion synthesis through prompt engineering","author":"Yu","year":"2024","journal-title":"arXiv:2401.06345"},{"doi-asserted-by":"publisher","key":"ref35","DOI":"10.1109\/TVCG.2023.3327168"},{"doi-asserted-by":"publisher","key":"ref36","DOI":"10.1609\/aaai.v38i5.28226"},{"doi-asserted-by":"publisher","key":"ref37","DOI":"10.1109\/CVPR52729.2023.02156"},{"key":"ref38","article-title":"Composer: Creative and controllable image synthesis with composable conditions","author":"Huang","year":"2023","journal-title":"arXiv:2302.09778"},{"key":"ref39","article-title":"Uni-ControlNet: All-in-one control to text-to-image diffusion models","author":"Zhao","year":"2023","journal-title":"arXiv:2305.16322"},{"doi-asserted-by":"publisher","key":"ref40","DOI":"10.1109\/CVPR52729.2023.00192"},{"key":"ref41","article-title":"Inserting anybody in diffusion models via celeb basis","author":"Yuan","year":"2023","journal-title":"arXiv:2306.00926"},{"key":"ref42","article-title":"Cross initialization for personalized text-to-image generation","author":"Pang","year":"2023","journal-title":"arXiv:2312.15905"},{"doi-asserted-by":"publisher","key":"ref43","DOI":"10.1609\/aaai.v39i8.32904"},{"doi-asserted-by":"publisher","key":"ref44","DOI":"10.5555\/2969033.2969125"},{"doi-asserted-by":"publisher","key":"ref45","DOI":"10.1109\/TIP.2023.3326675"},{"doi-asserted-by":"publisher","key":"ref46","DOI":"10.1109\/TIP.2024.3385295"},{"doi-asserted-by":"publisher","key":"ref47","DOI":"10.1109\/TIP.2022.3226413"},{"doi-asserted-by":"publisher","key":"ref48","DOI":"10.1109\/TIP.2023.3326680"},{"doi-asserted-by":"publisher","key":"ref49","DOI":"10.1109\/TIP.2022.3214336"},{"key":"ref50","first-page":"8780","article-title":"Diffusion models beat GANs on image synthesis","volume-title":"Proc. NIPS","volume":"34","author":"Dhariwal"},{"key":"ref51","article-title":"UFOGen: You forward once large scale text-to-image generation via diffusion GANs","author":"Xu","year":"2023","journal-title":"arXiv:2311.09257"},{"key":"ref52","article-title":"Tackling the generative learning trilemma with denoising diffusion GANs","author":"Xiao","year":"2021","journal-title":"arXiv:2112.07804"},{"key":"ref53","article-title":"Diffusion-GAN: Training GANs with diffusion","author":"Wang","year":"2022","journal-title":"arXiv:2206.02262"},{"doi-asserted-by":"publisher","key":"ref54","DOI":"10.1109\/CVPR46437.2021.01268"},{"key":"ref55","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume-title":"Proc. Adv. Neural Inf. Process. Syst. (NIPS)","author":"Ouyang"},{"doi-asserted-by":"publisher","key":"ref56","DOI":"10.1109\/CVPR.2017.632"},{"doi-asserted-by":"publisher","key":"ref57","DOI":"10.1109\/CVPR.2018.00068"},{"doi-asserted-by":"publisher","key":"ref58","DOI":"10.1109\/TNNLS.2023.3236486"},{"key":"ref59","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv:1412.6980"},{"key":"ref60","article-title":"Classifier-free diffusion guidance","author":"Ho","year":"2022","journal-title":"arXiv:2207.12598"},{"doi-asserted-by":"publisher","key":"ref61","DOI":"10.1109\/CVPR.2019.00482"},{"key":"ref62","article-title":"SingleInsert: Inserting new concepts from a single image into text-to-image models for flexible editing","author":"Wu","year":"2023","journal-title":"arXiv:2310.08094"},{"key":"ref63","article-title":"Are GANs created equal? A large-scale study","author":"Lucic","year":"2017","journal-title":"arXiv:1711.10337"},{"key":"ref64","article-title":"Generative adversarial interpolative autoencoding: Adversarial training on latent space interpolations encourage convex latent distributions","author":"Sainburg","year":"2018","journal-title":"arXiv:1807.06650"},{"doi-asserted-by":"publisher","key":"ref65","DOI":"10.1109\/TIP.2023.3323452"},{"doi-asserted-by":"publisher","key":"ref66","DOI":"10.3390\/electronics12010218"},{"doi-asserted-by":"publisher","key":"ref67","DOI":"10.1109\/CVPR.2017.143"},{"key":"ref68","article-title":"ModelScope text-to-video technical report","volume-title":"arXiv:2308.06571","author":"Wang","year":"2023"},{"key":"ref69","article-title":"LucidDreamer: Towards high-fidelity text-to-3D generation via interval score matching","author":"Liang","year":"2023","journal-title":"arXiv:2311.11284"},{"doi-asserted-by":"publisher","key":"ref70","DOI":"10.48550\/arXiv.1312.6114"},{"key":"ref71","article-title":"Flow straight and fast: Learning to generate and transfer data with rectified flow","author":"Liu","year":"2022","journal-title":"arXiv:2209.03003"}],"container-title":["IEEE Transactions on Image Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/83\/10795784\/10964554.pdf?arnumber=10964554","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,5]],"date-time":"2025-05-05T18:00:02Z","timestamp":1746468002000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10964554\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":71,"URL":"https:\/\/doi.org\/10.1109\/tip.2025.3558668","relation":{},"ISSN":["1057-7149","1941-0042"],"issn-type":[{"type":"print","value":"1057-7149"},{"type":"electronic","value":"1941-0042"}],"subject":[],"published":{"date-parts":[[2025]]}}}