{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,25]],"date-time":"2025-09-25T17:09:36Z","timestamp":1758820176579,"version":"3.40.3"},"publisher-location":"Cham","reference-count":44,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031732416"},{"type":"electronic","value":"9783031732423"}],"license":[{"start":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T00:00:00Z","timestamp":1730160000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,29]],"date-time":"2024-10-29T00:00:00Z","timestamp":1730160000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-73242-3_17","type":"book-chapter","created":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T09:15:43Z","timestamp":1730106943000},"page":"297-313","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["MultiGen: Zero-Shot Image Generation from\u00a0Multi-modal Prompts"],"prefix":"10.1007","author":[{"given":"Zhi-Fan","family":"Wu","sequence":"first","affiliation":[]},{"given":"Lianghua","family":"Huang","sequence":"additional","affiliation":[]},{"given":"Wei","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Yanheng","family":"Wei","sequence":"additional","affiliation":[]},{"given":"Yu","family":"Liu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,29]]},"reference":[{"key":"17_CR1","doi-asserted-by":"crossref","unstructured":"Brooks, T., Holynski, A., Efros, A.A.: InstructPix2Pix: learning to follow image editing instructions. arXiv:2211.09800 (2022)","DOI":"10.1109\/CVPR52729.2023.01764"},{"key":"17_CR2","doi-asserted-by":"crossref","unstructured":"Caron, M., et al.: Emerging properties in self-supervised vision transformers. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 9650\u20139660 (2021)","DOI":"10.1109\/ICCV48922.2021.00951"},{"key":"17_CR3","unstructured":"Chang, H., et al.: Muse: Text-to-image generation via masked generative transformers. In: Proceedings of the 40th International Conference on Machine Learning (ICML), pp. 4055\u20134075 (2023)"},{"key":"17_CR4","unstructured":"Chen, W., et al.: Subject-driven text-to-image generation via apprenticeship learning. arXiv:2304.00186 (2023)"},{"key":"17_CR5","unstructured":"Chen, W., Hu, H., Saharia, C., Cohen, W.W.: Re-imagen: retrieval-augmented text-to-image generator. arXiv:2209.14491 (2022)"},{"key":"17_CR6","unstructured":"Dhariwal, P., Nichol, A.Q.: Diffusion models beat GANs on image synthesis. In: Advances in Neural Information Processing Systems (NeurIPS), vol. 34, pp. 8780\u20138794 (2021)"},{"key":"17_CR7","unstructured":"Dosovitskiy, A., et al.: An image is worth 16$$\\times $$16 words: transformers for image recognition at scale. In: 9th International Conference on Learning Representations (ICLR) (2021)"},{"key":"17_CR8","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"89","DOI":"10.1007\/978-3-031-19784-0_6","volume-title":"Computer Vision \u2013 ECCV 2022","author":"O Gafni","year":"2022","unstructured":"Gafni, O., Polyak, A., Ashual, O., Sheynin, S., Parikh, D., Taigman, Y.: Make-a-scene: scene-based text-to-image generation with human priors. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13675, pp. 89\u2013106. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19784-0_6"},{"key":"17_CR9","unstructured":"Gal, R., et al.: An image is worth one word: personalizing text-to-image generation using textual inversion. arXiv:2208.01618 (2022)"},{"key":"17_CR10","unstructured":"Hendrycks, D., Gimpel, K.: Gaussian error linear units (GELUs). arXiv:1606.08415 (2016)"},{"key":"17_CR11","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In: Advances in Neural Information Processing Systems (NeurIPS), vol. 33, pp. 6840\u20136851 (2020)"},{"key":"17_CR12","unstructured":"Huang, L., Chen, D., Liu, Y., Shen, Y., Zhao, D., Zhou, J.: Composer: creative and controllable image synthesis with composable conditions. In: Proceedings of the 40th International Conference on Machine Learning (ICML), pp. 13753\u201313773 (2023)"},{"key":"17_CR13","doi-asserted-by":"crossref","unstructured":"Kang, M., et al.: Scaling up GANs for text-to-image synthesis. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10124\u201310134 (2023)","DOI":"10.1109\/CVPR52729.2023.00976"},{"key":"17_CR14","doi-asserted-by":"crossref","unstructured":"Kumari, N., Zhang, B., Zhang, R., Shechtman, E., Zhu, J.Y.: Multi-concept customization of text-to-image diffusion. arXiv:2212.04488 (2022)","DOI":"10.1109\/CVPR52729.2023.00192"},{"key":"17_CR15","unstructured":"Li, D., Li, J., Hoi, S.: Blip-diffusion: pre-trained subject representation for controllable text-to-image generation and editing. arXiv:2305.14720 (2023)"},{"key":"17_CR16","unstructured":"Li, W., Wang, L., Li, W., Agustsson, E., Gool, L.V.: Webvision database: visual learning and understanding from web data. arXiv:1708.02862 (2017)"},{"key":"17_CR17","doi-asserted-by":"crossref","unstructured":"Li, Y., et al.: GLIGEN: open-set grounded text-to-image generation. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 22511\u201322521 (2023)","DOI":"10.1109\/CVPR52729.2023.02156"},{"key":"17_CR18","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"17_CR19","doi-asserted-by":"crossref","unstructured":"Liu, S., et al.: Grounding DINO: marrying DINO with grounded pre-training for open-set object detection. arXiv:2303.05499 (2023)","DOI":"10.1007\/978-3-031-72970-6_3"},{"key":"17_CR20","unstructured":"Liu, Z., et al.: Cones: concept neurons in diffusion models for customized generation. In: Proceedings of the 40th International Conference on Machine Learning (ICML), pp. 21548\u201321566 (2023)"},{"key":"17_CR21","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: 5th International Conference on Learning Representations (ICLR) (2017)"},{"key":"17_CR22","doi-asserted-by":"crossref","unstructured":"Mou, C., et al.: T2I-adapter: learning adapters to dig out more controllable ability for text-to-image diffusion models. arXiv:2302.08453 (2023)","DOI":"10.1609\/aaai.v38i5.28226"},{"key":"17_CR23","unstructured":"Nichol, A.Q., et al.: GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models. In: Proceedings of the 39th International Conference on Machine Learning (ICML), pp. 16784\u201316804 (2022)"},{"key":"17_CR24","unstructured":"Pan, X., Dong, L., Huang, S., Peng, Z., Chen, W., Wei, F.: Kosmos-G: generating images in context with multimodal large language models. arXiv:2310.02992 (2023)"},{"key":"17_CR25","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: Proceedings of the 38th International Conference on Machine Learning (ICML), pp. 8748\u20138763 (2021)"},{"key":"17_CR26","unstructured":"Ramesh, A., Dhariwal, P., Nichol, A., Chu, C., Chen, M.: Hierarchical text-conditional image generation with clip latents. arXiv:2204.06125 (2022)"},{"key":"17_CR27","unstructured":"Ramesh, A., et al.: Zero-shot text-to-image generation. In: Proceedings of the 38th International Conference on Machine Learning (ICML), pp. 8821\u20138831 (2021)"},{"key":"17_CR28","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"17_CR29","doi-asserted-by":"crossref","unstructured":"Ruiz, N., Li, Y., Jampani, V., Pritch, Y., Rubinstein, M., Aberman, K.: DreamBooth: fine tuning text-to-image diffusion models for subject-driven generation. arXiv:2208.12242 (2022)","DOI":"10.1109\/CVPR52729.2023.02155"},{"issue":"3","key":"17_CR30","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., et al.: ImageNet large scale visual recognition challenge. Int. J. Comput. Vision 115(3), 211\u2013252 (2015)","journal-title":"Int. J. Comput. Vision"},{"key":"17_CR31","doi-asserted-by":"crossref","unstructured":"Saharia, C., Chan, W., Saxena, S., Li, L., Whang, J., Denton, E., Ghasemipour, S.K.S., Ayan, B.K., Mahdavi, S.S., Lopes, R.G., et\u00a0al.: Photorealistic text-to-image diffusion models with deep language understanding. arXiv:2205.11487 (2022)","DOI":"10.1145\/3528233.3530757"},{"key":"17_CR32","unstructured":"Sauer, A., Karras, T., Laine, S., Geiger, A., Aila, T.: StyleGAN-T: unlocking the power of GANs for fast large-scale text-to-image synthesis. In: Proceedings of the 40th International Conference on Machine Learning (ICML), pp. 30105\u201330118 (2023)"},{"key":"17_CR33","unstructured":"Schuhmann, C., et al.: LAION-400M: open dataset of clip-filtered 400 million image-text pairs. arXiv:2111.02114 (2021)"},{"key":"17_CR34","unstructured":"Sheynin, S., et al.: kNN-diffusion: image generation via large-scale retrieval. In: 11st International Conference on Learning Representations (ICLR) (2023)"},{"key":"17_CR35","unstructured":"Sohl-Dickstein, J., Weiss, E.A., Maheswaranathan, N., Ganguli, S.: Deep unsupervised learning using nonequilibrium thermodynamics. In: Proceedings of the 32nd International Conference on Machine Learning (ICML), vol.\u00a037, pp. 2256\u20132265 (2015)"},{"key":"17_CR36","unstructured":"Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. In: 9th International Conference on Learning Representations (ICLR) (2021)"},{"key":"17_CR37","unstructured":"Song, Y., Ermon, S.: Generative modeling by estimating gradients of the data distribution. In: Advances in Neural Information Processing Systems (NeurIPS), vol. 32, pp. 11895\u201311907 (2019)"},{"key":"17_CR38","doi-asserted-by":"crossref","unstructured":"Su, Z., et al.: Pixel difference networks for efficient edge detection. In: IEEE International Conference on Computer Vision (ICCV), pp. 5097\u20135107 (2021)","DOI":"10.1109\/ICCV48922.2021.00507"},{"key":"17_CR39","unstructured":"Sun, Q., e al.: Generative multimodal models are in-context learners. arXiv:2312.13286 (2023)"},{"key":"17_CR40","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems (NeurIPS), vol. 30, pp. 5998\u20136008 (2017)"},{"key":"17_CR41","doi-asserted-by":"crossref","unstructured":"Yang, Z., et al.: ReCo: region-controlled text-to-image generation. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 14246\u201314255 (2023)","DOI":"10.1109\/CVPR52729.2023.01369"},{"key":"17_CR42","unstructured":"Yu, J., et al.: Scaling autoregressive models for content-rich text-to-image generation. Trans. Mach. Learn. Res. (2022)"},{"key":"17_CR43","doi-asserted-by":"crossref","unstructured":"Zhai, X., Mustafa, B., Kolesnikov, A., Beyer, L.: Sigmoid loss for language image pre-training. arXiv:2303.15343 (2023)","DOI":"10.1109\/ICCV51070.2023.01100"},{"key":"17_CR44","doi-asserted-by":"crossref","unstructured":"Zhang, L., Agrawala, M.: Adding conditional control to text-to-image diffusion models. arXiv:2302.05543 (2023)","DOI":"10.1109\/ICCV51070.2023.00355"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-73242-3_17","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,30]],"date-time":"2024-11-30T10:26:38Z","timestamp":1732962398000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-73242-3_17"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,29]]},"ISBN":["9783031732416","9783031732423"],"references-count":44,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-73242-3_17","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,10,29]]},"assertion":[{"value":"29 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}