{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T17:44:27Z","timestamp":1777657467485,"version":"3.51.4"},"publisher-location":"Cham","reference-count":94,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031732225","type":"print"},{"value":"9783031732232","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,8]],"date-time":"2024-11-08T00:00:00Z","timestamp":1731024000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,8]],"date-time":"2024-11-08T00:00:00Z","timestamp":1731024000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73223-2_20","type":"book-chapter","created":{"date-parts":[[2024,11,7]],"date-time":"2024-11-07T18:49:36Z","timestamp":1731005376000},"page":"343-362","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":9,"title":["ControlNet-XS: Rethinking the\u00a0Control of\u00a0Text-to-Image Diffusion Models as\u00a0Feedback-Control Systems"],"prefix":"10.1007","author":[{"given":"Denis","family":"Zavadski","sequence":"first","affiliation":[]},{"given":"Johann-Friedrich","family":"Feiden","sequence":"additional","affiliation":[]},{"given":"Carsten","family":"Rother","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,8]]},"reference":[{"key":"20_CR1","unstructured":"Midjourney (2023). https:\/\/www.midjourney.com\/"},{"key":"20_CR2","doi-asserted-by":"crossref","unstructured":"Bar-Tal, O., Ofri-Amar, D., Fridman, R., Kasten, Y., Dekel, T.: Text2live: Text-driven layered image and video editing. In: European Conference on Computer Vision, pp. 707\u2013723 (2022)","DOI":"10.1007\/978-3-031-19784-0_41"},{"key":"20_CR3","unstructured":"Betker, J., et al.: Improving Image Generation with Better Captions (2023)"},{"key":"20_CR4","doi-asserted-by":"crossref","unstructured":"Brooks, T., Holynski, A., Efros, A.A.: Instructpix2pix: learning to follow image editing instructions. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition. pp. 18392\u201318402 (2023)","DOI":"10.1109\/CVPR52729.2023.01764"},{"key":"20_CR5","doi-asserted-by":"crossref","unstructured":"Caesar, H., Uijlings, J., Ferrari, V.: Coco-stuff: thing and stuff classes in context. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1209\u20131218 (2018)","DOI":"10.1109\/CVPR.2018.00132"},{"key":"20_CR6","doi-asserted-by":"crossref","unstructured":"Cao, Z., Simon, T., Wei, S.E., Sheikh, Y.: Realtime multi-person 2d pose estimation using part affinity fields. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7291\u20137299 (2017)","DOI":"10.1109\/CVPR.2017.143"},{"key":"20_CR7","doi-asserted-by":"crossref","unstructured":"Chen, M., Laina, I., Vedaldi, A.: Training-free layout control with cross-attention guidance. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 5343\u20135353 (2024)","DOI":"10.1109\/WACV57701.2024.00526"},{"key":"20_CR8","unstructured":"Chen, Z., et al.: Vision transformer adapter for dense predictions. arXiv preprint arXiv:2205.08534 (2022)"},{"key":"20_CR9","unstructured":"Choi, J., Choi, Y., Kim, Y., Kim, J., Yoon, S.: Custom-edit: Text-guided image editing with customized diffusion models. arXiv preprint arXiv:2305.15779 (2023)"},{"key":"20_CR10","unstructured":"Couairon, G., Verbeek, J., Schwenk, H., Cord, M.: Diffedit: diffusion-based semantic image editing with mask guidance. arXiv preprint arXiv:2210.11427 (2022)"},{"key":"20_CR11","doi-asserted-by":"publisher","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In: Burstein, J., Doran, C., Solorio, T. (eds.) Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 4171\u20134186. Association for Computational Linguistics, Minneapolis, Minnesota (2019). https:\/\/doi.org\/10.18653\/v1\/N19-1423, https:\/\/aclanthology.org\/N19-1423","DOI":"10.18653\/v1\/N19-1423"},{"key":"20_CR12","first-page":"8780","volume":"34","author":"P Dhariwal","year":"2021","unstructured":"Dhariwal, P., Nichol, A.: Diffusion models beat gans on image synthesis. Adv. Neural. Inf. Process. Syst. 34, 8780\u20138794 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"20_CR13","unstructured":"Ding, M., et al.: CogView: mastering text-to-image generation via transformers. In: Ranzato, M., Beygelzimer, A., Dauphin, Y., Liang, P.S., Vaughan, J.W. (eds.) Advances in Neural Information Processing Systems, vol.\u00a034, pp. 19822\u201319835. Curran Associates, Inc. (2021). https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2021\/file\/a4d92e2cd541fca87e4620aba658316d-Paper.pdf"},{"key":"20_CR14","doi-asserted-by":"crossref","unstructured":"Esser, P., Rombach, R., Ommer, B.: Taming transformers for high-resolution image synthesis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12873\u201312883 (2021)","DOI":"10.1109\/CVPR46437.2021.01268"},{"key":"20_CR15","doi-asserted-by":"crossref","unstructured":"Feng, R., et al.: Ccedit: creative and controllable video editing via diffusion models. arXiv preprint arXiv:2309.16496 (2023)","DOI":"10.1109\/CVPR52733.2024.00641"},{"key":"20_CR16","doi-asserted-by":"crossref","unstructured":"Gafni, O., Polyak, A., Ashual, O., Sheynin, S., Parikh, D., Taigman, Y.: Make-A-Scene: Scene-Based Text-to-Image Generation with Human Priors. In: Avidan Shai and Brostow, G., Moustapha, C., Maria, F.G., Tal, H. (eds.) Computer Vision - ECCV 2022, pp. 89\u2013106. Springer Nature Switzerland, Cham (2022)","DOI":"10.1007\/978-3-031-19784-0_6"},{"key":"20_CR17","unstructured":"Gal, R., Alaluf, Y., Atzmon, Y., Patashnik, O., Bermano, A.H., Chechik, G., Cohen-Or, D.: An image is worth one word: personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618 (2022)"},{"key":"20_CR18","unstructured":"Goel, V., et al.: PAIR-Diffusion: object-level image editing with structure-and-appearance paired diffusion models (2023). http:\/\/arxiv.org\/abs\/2303.17546"},{"key":"20_CR19","unstructured":"Goodfellow, I., et al.: Generative adversarial nets. In: Advances in neural information processing systems, pp. 2672\u20132680 (2014)"},{"key":"20_CR20","unstructured":"He, Y., Salakhutdinov, R., Kolter, J.Z.: Localized text-to-image generation for free via cross attention control. arXiv preprint arXiv:2306.14636 (2023)"},{"key":"20_CR21","doi-asserted-by":"crossref","unstructured":"Hessel, J., Holtzman, A., Forbes, M., Bras, R.L., Choi, Y.: Clipscore: a reference-free evaluation metric for image captioning. arXiv preprint arXiv:2104.08718 (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.595"},{"key":"20_CR22","unstructured":"Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems 30 (2017)"},{"key":"20_CR23","first-page":"6840","volume":"33","author":"J Ho","year":"2020","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Adv. Neural. Inf. Process. Syst. 33, 6840\u20136851 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"issue":"1","key":"20_CR24","first-page":"2249","volume":"23","author":"J Ho","year":"2022","unstructured":"Ho, J., Saharia, C., Chan, W., Fleet, D.J., Norouzi, M., Salimans, T.: Cascaded diffusion models for high fidelity image generation. J. Mach. Learn. Res. 23(1), 2249\u20132281 (2022)","journal-title":"J. Mach. Learn. Res."},{"key":"20_CR25","unstructured":"Hu, E.J., et al.: LoRA: Low-Rank Adaptation of Large Language Models (2021)"},{"key":"20_CR26","doi-asserted-by":"crossref","unstructured":"Hu, H., et al.: Instruct-Imagen: Image generation with multi-modal instruction. arXiv preprint arXiv:2401.01952 (2024)","DOI":"10.1109\/CVPR52733.2024.00455"},{"key":"20_CR27","unstructured":"Hu, M., Zheng, J., Liu, D., Zheng, C., Wang, C., Tao, D., Cham, T.J.: Cocktail: mixing multi-modality control for text-conditional image generation. In: Thirty-seventh Conference on Neural Information Processing Systems (2023)"},{"key":"20_CR28","unstructured":"Huang, L., Chen, D., Liu, Y., Shen, Y., Zhao, D., Zhou, J.: Composer: creative and controllable image synthesis with composable conditions. arXiv preprint arXiv:2302.09778 (2023)"},{"key":"20_CR29","doi-asserted-by":"crossref","unstructured":"Huang, T., et al.: Dreamcontrol: control-based text-to-3d generation with 3d self-prior. arXiv preprint arXiv:2312.06439 (2023)","DOI":"10.1109\/CVPR52733.2024.00513"},{"key":"20_CR30","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1125\u20131134 (2017)","DOI":"10.1109\/CVPR.2017.632"},{"key":"20_CR31","doi-asserted-by":"crossref","unstructured":"Kang, M., et al.: Scaling up gans for text-to-image synthesis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10124\u201310134 (2023)","DOI":"10.1109\/CVPR52729.2023.00976"},{"key":"20_CR32","unstructured":"Karras, T., Aila, T., Laine, S., Lehtinen, J.: Progressive growing of gans for improved quality, stability, and variation. arXiv preprint arXiv:1710.10196 (2017)"},{"key":"20_CR33","unstructured":"Karras, T., et al.: Alias-free generative adversarial networks. Advances in Neural Information Processing Systems 34 (2021)"},{"key":"20_CR34","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4401\u20134410 (2019)","DOI":"10.1109\/CVPR.2019.00453"},{"key":"20_CR35","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of stylegan. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 8110\u20138119 (2020)","DOI":"10.1109\/CVPR42600.2020.00813"},{"key":"20_CR36","first-page":"21696","volume":"34","author":"D Kingma","year":"2021","unstructured":"Kingma, D., Salimans, T., Poole, B., Ho, J.: Variational diffusion models. Adv. Neural. Inf. Process. Syst. 34, 21696\u201321707 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"20_CR37","unstructured":"Li, T., Ku, M., Wei, C., Chen, W.: DreamEdit: Subject-driven Image Editing. arXiv preprint arXiv:2306.12624 (2023)"},{"key":"20_CR38","doi-asserted-by":"crossref","unstructured":"Li, W., Xu, X., Liu, J., Xiao, X.: UNIMO-G: unified image generation through multimodal conditional diffusion. arXiv preprint arXiv:2401.13388 (2024)","DOI":"10.18653\/v1\/2024.acl-long.335"},{"key":"20_CR39","doi-asserted-by":"publisher","unstructured":"Li, Y., Mao, H., Girshick, R., He, K.: Exploring Plain Vision Transformer Backbones for Object Detection. In: Computer Vision - ECCV 2022, pp. 280\u2013296. Springer Nature Switzerland (2022). https:\/\/doi.org\/10.1007\/978-3-031-20077-9_17, http:\/\/dx.doi.org\/10.1007\/978-3-031-20077-9_17","DOI":"10.1007\/978-3-031-20077-9_17"},{"key":"20_CR40","doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: Gligen: open-set grounded text-to-image generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 22511\u201322521 (2023)","DOI":"10.1109\/CVPR52729.2023.02156"},{"key":"20_CR41","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"20_CR42","doi-asserted-by":"crossref","unstructured":"Liu, L., Chen, J., Wu, H., Li, G., Li, C., Lin, L.: Cross-modal collaborative representation learning and a large-scale rgbt benchmark for crowd counting. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4823\u20134833 (2021)","DOI":"10.1109\/CVPR46437.2021.00479"},{"key":"20_CR43","first-page":"1","volume":"60","author":"C Lu","year":"2022","unstructured":"Lu, C., Xia, M., Qian, M., Chen, B.: Dual-branch network for cloud and cloud shadow segmentation. IEEE Trans. Geosci. Remote Sens. 60, 1\u201312 (2022)","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"20_CR44","unstructured":"Lukovnikov, D., Fischer, A.: Layout-to-Image Generation with Localized Descriptions using ControlNet with Cross-Attention Control. arXiv preprint arXiv:2402.13404 (2024)"},{"key":"20_CR45","doi-asserted-by":"crossref","unstructured":"Mao1, Y., et al.: UNIPELT: a unified framework for parameter-efficient language model tuning. In: Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics Volume 1: Long Papers, vol.\u00a01, pp. 6253\u20136264. ACL (2022)","DOI":"10.18653\/v1\/2022.acl-long.433"},{"key":"20_CR46","doi-asserted-by":"publisher","unstructured":"McCloskey, M., Cohen, N.J.: Catastrophic Interference in Connectionist Networks: The Sequential Learning Problem, vol.\u00a024, pp. 109\u2013165. Academic Press (198). https:\/\/doi.org\/10.1016\/S0079-7421(08)60536-8. https:\/\/www.sciencedirect.com\/science\/article\/pii\/S0079742108605368","DOI":"10.1016\/S0079-7421(08)60536-8"},{"key":"20_CR47","doi-asserted-by":"crossref","unstructured":"Mou, C., et al.: T2i-adapter: Learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv preprint arXiv:2302.08453 (2023)","DOI":"10.1609\/aaai.v38i5.28226"},{"key":"20_CR48","doi-asserted-by":"crossref","unstructured":"Murez, Z., Kolouri, S., Kriegman, D., Ramamoorthi, R., Kim, K.: Image to image translation for domain adaptation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4500\u20134509 (2018)","DOI":"10.1109\/CVPR.2018.00473"},{"key":"20_CR49","unstructured":"Nichol, A., et al.: Glide: towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741 (2021)"},{"key":"20_CR50","unstructured":"Nichol, A.Q., Dhariwal, P.: Improved denoising diffusion probabilistic models. In: International Conference on Machine Learning, pp. 8162\u20138171. PMLR (2021)"},{"key":"20_CR51","unstructured":"Patel, M., Jung, S., Baral, C., Yang, Y.: $$\\lambda $$-ECLIPSE: Multi-Concept Personalized Text-to-Image Diffusion Models by Leveraging CLIP Latent Space. arXiv preprint arXiv:2402.05195 (2024)"},{"key":"20_CR52","doi-asserted-by":"crossref","unstructured":"Pfeiffer, J., Kamath, A., Ruckl, A., Cho, K., Gurevych1, I.: AdapterFusion: non-destructive task composition for transfer learning. In: Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics, pp. 487\u2013503. ACL (2021)","DOI":"10.18653\/v1\/2021.eacl-main.39"},{"key":"20_CR53","unstructured":"Podell, D., et al.: SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis (2023)"},{"key":"20_CR54","unstructured":"Qin, C., et al.: UniControl: a unified diffusion model for controllable visual generation in the wild. arXiv preprint arXiv:2305.11147 (2023)"},{"key":"20_CR55","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763 (2021)"},{"key":"20_CR56","unstructured":"Radford, A., Metz, L., Chintala, S.: Unsupervised representation learning with deep convolutional generative adversarial networks (2016)"},{"issue":"1","key":"20_CR57","first-page":"5485","volume":"21","author":"C Raffel","year":"2020","unstructured":"Raffel, C., et al.: Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res. 21(1), 5485\u20135551 (2020)","journal-title":"J. Mach. Learn. Res."},{"key":"20_CR58","unstructured":"Ramesh, A., Dhariwal, P., Nichol, A., Chu, C., Chen, M.: Hierarchical Text-Conditional Image Generation with CLIP Latents (2022)"},{"key":"20_CR59","unstructured":"Ramesh, A., et al.: Zero-Shot Text-to-Image Generation. CoRR abs\/2102.1 (2021). https:\/\/arxiv.org\/abs\/2102.12092"},{"issue":"3","key":"20_CR60","doi-asserted-by":"publisher","first-page":"1623","DOI":"10.1109\/TPAMI.2020.3019967","volume":"44","author":"R Ranftl","year":"2020","unstructured":"Ranftl, R., Lasinger, K., Hafner, D., Schindler, K., Koltun, V.: Towards robust monocular depth estimation: mixing datasets for zero-shot cross-dataset transfer. IEEE Trans. Pattern Anal. Mach. Intell. 44(3), 1623\u20131637 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"20_CR61","doi-asserted-by":"crossref","unstructured":"Rebuffi, S.A., Bilen, H., Vedaldi, A.: Efficient Parametrization of Multi-Domain Deep Neural Networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2018)","DOI":"10.1109\/CVPR.2018.00847"},{"key":"20_CR62","unstructured":"Reed, S., Akata, Z., Yan, X., Logeswaran, L., Schiele, B., Lee, H.: Generative adversarial text to image synthesis. In: International Conference on Machine Learning, pp. 1060\u20131069. PMLR (2016)"},{"key":"20_CR63","unstructured":"Reed, S.E., Akata, Z., Mohan, S., Tenka, S., Schiele, B., Lee, H.: Learning what and where to draw. Advances in neural information processing systems 29 (2016)"},{"key":"20_CR64","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (2022). https:\/\/github.com\/CompVis\/latent-diffusion","DOI":"10.1109\/CVPR52688.2022.01042"},{"issue":"3","key":"20_CR65","doi-asserted-by":"publisher","first-page":"651","DOI":"10.1109\/TPAMI.2018.2884462","volume":"42","author":"A Rosenfeld","year":"2020","unstructured":"Rosenfeld, A., Tsotsos, J.K.: Incremental learning through deep adaptation. IEEE Trans. Pattern Anal. Mach. Intell. 42(3), 651\u2013663 (2020). https:\/\/doi.org\/10.1109\/TPAMI.2018.2884462","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"20_CR66","doi-asserted-by":"crossref","unstructured":"Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., Aberman, K.: Dreambooth: fine tuning text-to-image diffusion models for subject-driven generation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 22500\u201322510 (2023)","DOI":"10.1109\/CVPR52729.2023.02155"},{"key":"20_CR67","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., et al.: Imagenet large scale visual recognition challenge. Int. J. Comput. Vision 115, 211\u2013252 (2015)","journal-title":"Int. J. Comput. Vision"},{"key":"20_CR68","doi-asserted-by":"publisher","unstructured":"Saharia, C., et al.: Palette: image-to-image diffusion models. In: Special Interest Group on Computer Graphics and Interactive Techniques Conference Proceedings. ACM (2022). https:\/\/doi.org\/10.1145\/3528233.3530757, http:\/\/dx.doi.org\/10.1145\/3528233.3530757","DOI":"10.1145\/3528233.3530757"},{"key":"20_CR69","unstructured":"Saharia, C., et al.: Photorealistic Text-to-Image Diffusion Models with Deep Language Understanding. In: Koyejo, S., Mohamed, S., Agarwal, A., Belgrave, D., Cho, K., Oh, A. (eds.) Advances in Neural Information Processing Systems, vol.\u00a035, pp. 36479\u201336494. Curran Associates, Inc. (2022). https:\/\/proceedings.neurips.cc\/paper_files\/paper\/2022\/file\/ec795aeadae0b7d230fa35cbaf04c041-Paper-Conference.pdf"},{"key":"20_CR70","unstructured":"Sauer, A., Karras, T., Laine, S., Geiger, A., Aila, T.: Stylegan-t: Unlocking the power of gans for fast large-scale text-to-image synthesis. arXiv preprint arXiv:2301.09515 (2023)"},{"key":"20_CR71","doi-asserted-by":"publisher","unstructured":"Sauer, A., Schwarz, K., Geiger, A.: StyleGAN-XL: Scaling StyleGAN to Large Diverse Datasets. In: ACM SIGGRAPH 2022 Conference Proceedings, pp. 1\u201310 (2022https:\/\/doi.org\/10.1145\/3528233.3530738","DOI":"10.1145\/3528233.3530738"},{"key":"20_CR72","first-page":"25278","volume":"35","author":"C Schuhmann","year":"2022","unstructured":"Schuhmann, C., et al.: Laion-5b: an open large-scale dataset for training next generation image-text models. Adv. Neural. Inf. Process. Syst. 35, 25278\u201325294 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"20_CR73","unstructured":"Sohl-Dickstein, J., Weiss, E., Maheswaranathan, N., Ganguli, S.: Deep unsupervised learning using nonequilibrium thermodynamics. In: International Conference on Machine Learning, pp. 2256\u20132265. PMLR (2015)"},{"key":"20_CR74","unstructured":"Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502 (2020)"},{"key":"20_CR75","unstructured":"Stickland, A.C., Murray, I.: BERT and PALs: projected attention layers for efficient adaptation in multi-task learning. In: Chaudhuri, K., Salakhutdinov, R. (eds.) Proceedings of the 36th International Conference on Machine Learning, vol.\u00a097, pp. 5986\u20135995. PMLR (2019). https:\/\/proceedings.mlr.press\/v97\/stickland19a.html"},{"key":"20_CR76","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"717","DOI":"10.1007\/978-3-030-58595-2_43","volume-title":"Computer Vision \u2013 ECCV 2020","author":"H Tang","year":"2020","unstructured":"Tang, H., Bai, S., Zhang, L., Torr, P.H.S., Sebe, N.: XingGAN for person image generation. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12370, pp. 717\u2013734. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58595-2_43"},{"key":"20_CR77","doi-asserted-by":"crossref","unstructured":"Tao, M., Tang, H., Wu, F., Jing, X.Y., Bao, B.K., Xu, C.: Df-gan: a simple and effective baseline for text-to-image synthesis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 16515\u201316525 (2022)","DOI":"10.1109\/CVPR52688.2022.01602"},{"key":"20_CR78","unstructured":"Wah, C., Branson, S., Welinder, P., Perona, P., Belongie, S.: The caltech-ucsd birds-200-2011 dataset (2011)"},{"key":"20_CR79","unstructured":"Wang, T., et al.: Pretraining is All You Need for Image-to-Image Translation (2022)"},{"key":"20_CR80","doi-asserted-by":"crossref","unstructured":"Wang, W., Guo, R., Tian, Y., Yang, W.: Cfsnet: toward a controllable feature space for image restoration. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 4140\u20134149 (2019)","DOI":"10.1109\/ICCV.2019.00424"},{"key":"20_CR81","doi-asserted-by":"crossref","unstructured":"Xia, Y., Monica, J., Chao, W.L., Hariharan, B., Weinberger, K.Q., Campbell, M.: Image-to-image translation for autonomous driving from coarsely-aligned image pairs. In: 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 7756\u20137762. IEEE (2023)","DOI":"10.1109\/ICRA48891.2023.10160815"},{"key":"20_CR82","doi-asserted-by":"crossref","unstructured":"Xu, T., Zhang, P., Huang, Q., Zhang, H., Gan, Z., Huang, X., He, X.: Attngan: fine-grained text to image generation with attentional generative adversarial networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1316\u20131324 (2018)","DOI":"10.1109\/CVPR.2018.00143"},{"key":"20_CR83","doi-asserted-by":"crossref","unstructured":"Xu, X., Wang, Z., Zhang, G., Wang, K., Shi, H.: Versatile diffusion: text, images and variations all in one diffusion model. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 7754\u20137765 (2023)","DOI":"10.1109\/ICCV51070.2023.00713"},{"key":"20_CR84","doi-asserted-by":"crossref","unstructured":"Yang, B., Gu, S., Zhang, B., Zhang, T., Chen, X., Sun, X., Chen, D., Wen, F.: Paint by example: Exemplar-based image editing with diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 18381\u201318391 (2023)","DOI":"10.1109\/CVPR52729.2023.01763"},{"key":"20_CR85","unstructured":"Yu, J., et al.: Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2(3), 5 (2022)"},{"key":"20_CR86","doi-asserted-by":"crossref","unstructured":"Zhang, H., et al.: Stackgan: text to photo-realistic image synthesis with stacked generative adversarial networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 5907\u20135915 (2017)","DOI":"10.1109\/ICCV.2017.629"},{"key":"20_CR87","doi-asserted-by":"crossref","unstructured":"Zhang, H., et al.: Avatarverse: High-quality & stable 3d avatar creation from text and pose. arXiv preprint arXiv:2308.03610 (2023)","DOI":"10.1609\/aaai.v38i7.28540"},{"key":"20_CR88","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 3836\u20133847 (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"20_CR89","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 586\u2013595 (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"key":"20_CR90","unstructured":"Zhao, S., Chen, D., Chen, Y.C., Bao, J., Hao, S., Yuan, L., Wong, K.Y.K.: Uni-controlnet: All-in-one control to text-to-image diffusion models. Advances in Neural Information Processing Systems 36 (2024)"},{"key":"20_CR91","unstructured":"Zhao, Y., et al.: Local Conditional Controlling for Text-to-Image Diffusion Models. arXiv preprint arXiv:2312.08768 (2023)"},{"key":"20_CR92","unstructured":"Zhao, Y., Xie, E., Hong, L., Li, Z., Lee, G.H.: Make-a-protagonist: generic video editing with an ensemble of experts. arXiv preprint arXiv:2305.08850 (2023)"},{"key":"20_CR93","doi-asserted-by":"crossref","unstructured":"Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2223\u20132232 (2017)","DOI":"10.1109\/ICCV.2017.244"},{"key":"20_CR94","doi-asserted-by":"crossref","unstructured":"Zhu, M., Pan, P., Chen, W., Yang, Y.: Dm-gan: dynamic memory generative adversarial networks for text-to-image synthesis. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5802\u20135810 (2019)","DOI":"10.1109\/CVPR.2019.00595"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73223-2_20","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,7]],"date-time":"2024-11-07T19:07:08Z","timestamp":1731006428000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73223-2_20"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,8]]},"ISBN":["9783031732225","9783031732232"],"references-count":94,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73223-2_20","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,8]]},"assertion":[{"value":"8 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}