{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,10]],"date-time":"2026-04-10T18:19:47Z","timestamp":1775845187053,"version":"3.50.1"},"publisher-location":"Cham","reference-count":56,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031726606","type":"print"},{"value":"9783031726613","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:00:00Z","timestamp":1732665600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T00:00:00Z","timestamp":1732665600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72661-3_9","type":"book-chapter","created":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T07:52:40Z","timestamp":1732607560000},"page":"150-168","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":54,"title":["BrushNet: A Plug-and-Play Image Inpainting Model with\u00a0Decomposed Dual-Branch Diffusion"],"prefix":"10.1007","author":[{"given":"Xuan","family":"Ju","sequence":"first","affiliation":[]},{"given":"Xian","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Xintao","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Yuxuan","family":"Bian","sequence":"additional","affiliation":[]},{"given":"Ying","family":"Shan","sequence":"additional","affiliation":[]},{"given":"Qiang","family":"Xu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,27]]},"reference":[{"issue":"4","key":"9_CR1","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3592450","volume":"42","author":"O Avrahami","year":"2023","unstructured":"Avrahami, O., Fried, O., Lischinski, D.: Blended latent diffusion. ACM Trans. Graph. (TOG) 42(4), 1\u201311 (2023)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"9_CR2","doi-asserted-by":"crossref","unstructured":"Avrahami, O., Lischinski, D., Fried, O.: Blended diffusion for text-driven editing of natural images. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 18208\u201318218 (2022)","DOI":"10.1109\/CVPR52688.2022.01767"},{"key":"9_CR3","doi-asserted-by":"crossref","unstructured":"Bertalmio, M., Sapiro, G., Caselles, V., Ballester, C.: Image inpainting. In: International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), pp. 417\u2013424 (2000)","DOI":"10.1145\/344779.344972"},{"key":"9_CR4","unstructured":"Binghui, C., Chao, L., Chongyang, Z., Wangmeng, X., Yifeng, G., Xuansong, X.: Replaceanything as you want: Ultra-high quality content replacement (2023). https:\/\/aigcdesigngroup.github.io\/replace-anything\/"},{"key":"9_CR5","unstructured":"Bi\u0144kowski, M., Sutherland, D.J., Arbel, M., Gretton, A.: Demystifying MMD GANs. arXiv preprint arXiv:1801.01401 (2018)"},{"key":"9_CR6","doi-asserted-by":"crossref","unstructured":"Corneanu, C., Gadde, R., Martinez, A.M.: Latentpaint: image inpainting in latent space with diffusion models. In: IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV), pp. 4334\u20134343 (2024)","DOI":"10.1109\/WACV57701.2024.00428"},{"issue":"9","key":"9_CR7","doi-asserted-by":"publisher","first-page":"1200","DOI":"10.1109\/TIP.2004.833105","volume":"13","author":"A Criminisi","year":"2004","unstructured":"Criminisi, A., P\u00e9rez, P., Toyama, K.: Region filling and object removal by exemplar-based image inpainting. IEEE Trans. Image Process. 13(9), 1200\u20131212 (2004)","journal-title":"IEEE Trans. Image Process."},{"key":"9_CR8","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: a large-scale hierarchical image database. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 248\u2013255. IEEE (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"9_CR9","unstructured":"epinikion: epicrealism (2023). https:\/\/civitai.com\/models\/25694?modelVersionId=143906"},{"key":"9_CR10","unstructured":"heni29833: Henmixreal (2024). https:\/\/civitai.com\/models\/20282?modelVersionId=305687"},{"key":"9_CR11","unstructured":"Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: GANs trained by a two time-scale update rule converge to a local Nash equilibrium. Advances in Neural Information Processing Systems (NIPS) 30 (2017)"},{"key":"9_CR12","first-page":"6840","volume":"33","author":"J Ho","year":"2020","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. Advances in Neural Information Processing Systems (NIPS) 33, 6840\u20136851 (2020)","journal-title":"Advances in Neural Information Processing Systems (NIPS)"},{"key":"9_CR13","unstructured":"Ho, J., Salimans, T.: Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598 (2022)"},{"key":"9_CR14","unstructured":"Huang, H., He, R., Sun, Z., Tan, T., et\u00a0al.: Introvae: introspective variational autoencoders for photographic image synthesis. Advances in Neural Information Processing Systems (NIPS) 31 (2018)"},{"key":"9_CR15","unstructured":"Huang, Y., et al.: Diffusion model-based image editing: A survey. arXiv preprint arXiv:2402.17525 (2024)"},{"key":"9_CR16","doi-asserted-by":"crossref","unstructured":"Jayasumana, S., Ramalingam, S., Veit, A., Glasner, D., Chakrabarti, A., Kumar, S.: Rethinking fid: Towards a better evaluation metric for image generation. arXiv preprint arXiv:2401.09603 (2023)","DOI":"10.1109\/CVPR52733.2024.00889"},{"issue":"7","key":"9_CR17","doi-asserted-by":"publisher","first-page":"1956","DOI":"10.1007\/s11263-020-01316-z","volume":"128","author":"A Kuznetsova","year":"2020","unstructured":"Kuznetsova, A., et al.: The open images dataset v4: unified image classification, object detection, and visual relationship detection at scale. Int. J. Comput. Vis. (IJCV) 128(7), 1956\u20131981 (2020)","journal-title":"Int. J. Comput. Vis. (IJCV)"},{"key":"9_CR18","doi-asserted-by":"crossref","unstructured":"Li, Z., Wei, P., Yin, X., Ma, Z., Kot, A.C.: Virtual try-on with pose-garment keypoints guided inpainting. In: IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 22788\u201322797 (2023)","DOI":"10.1109\/ICCV51070.2023.02083"},{"key":"9_CR19","doi-asserted-by":"crossref","unstructured":"Lin, T.Y., et al.: Microsoft coco: Common objects in context. In: European Conference on Computer Vision (ECCV), pp. 740\u2013755. Springer (2014)","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"9_CR20","unstructured":"Liu, A., Niepert, M., Broeck, G.V.d.: Image inpainting via tractable steering of diffusion models. arXiv preprint arXiv:2401.03349 (2023)"},{"key":"9_CR21","doi-asserted-by":"crossref","unstructured":"Liu, H., Wan, Z., Huang, W., Song, Y., Han, X., Liao, J.: Pd-GAN: probabilistic diverse GAN for image inpainting. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 9371\u20139381 (2021)","DOI":"10.1109\/CVPR46437.2021.00925"},{"key":"9_CR22","doi-asserted-by":"crossref","unstructured":"Liu, Z., Luo, P., Wang, X., Tang, X.: Deep learning face attributes in the wild. In: IEEE\/CVF International Conference on Computer Vision (ICCV), December 2015","DOI":"10.1109\/ICCV.2015.425"},{"key":"9_CR23","doi-asserted-by":"crossref","unstructured":"Lugmayr, A., Danelljan, M., Romero, A., Yu, F., Timofte, R., Van\u00a0Gool, L.: RePaint: Inpainting using denoising diffusion probabilistic models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 11461\u201311471 (2022)","DOI":"10.1109\/CVPR52688.2022.01117"},{"key":"9_CR24","unstructured":"Lykon: Dreamshaper (2022). https:\/\/civitai.com\/models\/4384?modelVersionId=128713"},{"key":"9_CR25","unstructured":"Manukyan, H., Sargsyan, A., Atanyan, B., Wang, Z., Navasardyan, S., Shi, H.: Hd-painter: high-resolution and prompt-faithful text-guided image inpainting with diffusion models. arXiv preprint arXiv:2312.14091 (2023)"},{"key":"9_CR26","unstructured":"Meina: Meinamix (2023). https:\/\/civitai.com\/models\/7240?modelVersionId=119057"},{"key":"9_CR27","doi-asserted-by":"crossref","unstructured":"Peng, J., Liu, D., Xu, S., Li, H.: Generating diverse structure for image inpainting with hierarchical vq-vae. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10775\u201310784 (2021)","DOI":"10.1109\/CVPR46437.2021.01063"},{"key":"9_CR28","unstructured":"von Platen, P., et al.: Diffusers: State-of-the-art diffusion models. https:\/\/github.com\/huggingface\/diffusers (2022)"},{"key":"9_CR29","unstructured":"Quan, W., Chen, J., Liu, Y., Yan, D.M., Wonka, P.: Deep learning-based image and video inpainting: a survey. IntJ. of Computer Vision (IJCV) pp. 1\u201334 (2024)"},{"key":"9_CR30","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning (ICML), pp. 8748\u20138763. PMLR (2021)"},{"key":"9_CR31","doi-asserted-by":"crossref","unstructured":"Razzhigaev, A., et al.: Kandinsky: an improved text-to-image synthesis with image prior and latent diffusion. arXiv preprint arXiv:2310.03502 (2023)","DOI":"10.18653\/v1\/2023.emnlp-demo.25"},{"key":"9_CR32","unstructured":"Ren, T., et\u00a0al.: Grounded sam: assembling open-world models for diverse visual tasks. arXiv preprint arXiv:2401.14159 (2024)"},{"key":"9_CR33","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 10684\u201310695, June 2022","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"9_CR34","first-page":"25278","volume":"35","author":"C Schuhmann","year":"2022","unstructured":"Schuhmann, C., et al.: Laion-5b: an open large-scale dataset for training next generation image-text models. Advances in Neural Information Processing Systems (NIPS) 35, 25278\u201325294 (2022)","journal-title":"Advances in Neural Information Processing Systems (NIPS)"},{"key":"9_CR35","unstructured":"SG161222: Realisticvision (2023). https:\/\/civitai.com\/models\/4201?modelVersionId=130072"},{"key":"9_CR36","unstructured":"Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502 (2020)"},{"key":"9_CR37","doi-asserted-by":"crossref","unstructured":"Wang, S., et\u00a0al.: Imagen editor and editbench: advancing and evaluating text-guided image inpainting. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 18359\u201318369 (2023)","DOI":"10.1109\/CVPR52729.2023.01761"},{"key":"9_CR38","unstructured":"Wikipedia contributors: Mean squared error \u2014 Wikipedia, the free encyclopedia (2024). https:\/\/en.wikipedia.org\/w\/index.php?title=Mean_squared_error&oldid=1207422018. Accessed 4 Mar 2024"},{"key":"9_CR39","unstructured":"Wikipedia contributors: Peak signal-to-noise ratio \u2014 Wikipedia, the free encyclopedia (2024). https:\/\/en.wikipedia.org\/w\/index.php?title=Peak_signal-to-noise_ratio&oldid=1210897995. Accessed 4 Mar 2024"},{"key":"9_CR40","unstructured":"Wu, C., et al.: GODIVA: generating open-domain videos from natural descriptions. arXiv preprint arXiv:2104.14806 (2021)"},{"key":"9_CR41","unstructured":"Wu, X., et al.: Human preference score v2: a solid benchmark for evaluating human preferences of text-to-image synthesis. arXiv preprint arXiv:2306.09341 (2023)"},{"key":"9_CR42","doi-asserted-by":"crossref","unstructured":"Xie, S., Zhang, Z., Lin, Z., Hinz, T., Zhang, K.: Smartbrush: text and shape guided object inpainting with diffusion model. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 22428\u201322437 (2023)","DOI":"10.1109\/CVPR52729.2023.02148"},{"key":"9_CR43","unstructured":"Xie, S., et al.: Dreaminpainter: text-guided subject-driven image inpainting with diffusion models. arXiv preprint arXiv:2312.03771 (2023)"},{"key":"9_CR44","unstructured":"Xu, J., et al.: Imagereward: Learning and evaluating human preferences for text-to-image generation (2023)"},{"issue":"20","key":"9_CR45","doi-asserted-by":"publisher","first-page":"11189","DOI":"10.3390\/app132011189","volume":"13","author":"Z Xu","year":"2023","unstructured":"Xu, Z., Zhang, X., Chen, W., Yao, M., Liu, J., Xu, T., Wang, Z.: A review of image inpainting methods based on deep learning. Appl. Sci. 13(20), 11189 (2023)","journal-title":"Appl. Sci."},{"key":"9_CR46","doi-asserted-by":"crossref","unstructured":"Yang, S., Chen, X., Liao, J.: Uni-paint: a unified framework for multimodal image inpainting with pretrained diffusion model. In: ACM International Conference on Multimedia (MM), pp. 3190\u20133199 (2023)","DOI":"10.1145\/3581783.3612200"},{"key":"9_CR47","unstructured":"Yang, S., Zhang, L., Ma, L., Liu, Y., Fu, J., He, Y.: Magicremover: tuning-free text-guided image inpainting with diffusion models. arXiv preprint arXiv:2310.02848 (2023)"},{"key":"9_CR48","unstructured":"Yu, F., Seff, A., Zhang, Y., Song, S., Funkhouser, T., Xiao, J.: Lsun: construction of a large-scale image dataset using deep learning with humans in the loop. arXiv preprint arXiv:1506.03365 (2015)"},{"key":"9_CR49","unstructured":"Yu, T., et al.: Inpaint anything: segment anything meets image inpainting. arXiv preprint arXiv:2304.06790 (2023)"},{"key":"9_CR50","unstructured":"Zhang, G., Ji, J., Zhang, Y., Yu, M., Jaakkola, T., Chang, S.: Towards coherent image inpainting using denoising diffusion implicit models (2023)"},{"key":"9_CR51","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models. In: IEEE\/CVF International Conference on Computer Vision (ICCV) (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"9_CR52","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 586\u2013595 (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"key":"9_CR53","unstructured":"Zhao, S., et al.: Large scale image completion via co-modulated generative adversarial networks. arXiv preprint arXiv:2103.10428 (2021)"},{"key":"9_CR54","doi-asserted-by":"crossref","unstructured":"Zheng, C., Cham, T.J., Cai, J.: Pluralistic image completion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1438\u20131447 (2019)","DOI":"10.1109\/CVPR.2019.00153"},{"key":"9_CR55","doi-asserted-by":"crossref","unstructured":"Zheng, H., et al.: Image inpainting with cascaded modulation GAN and object-aware training. In: European Conference on Computer Vision (ECCV), pp. 277\u2013296. Springer (2022)","DOI":"10.1007\/978-3-031-19787-1_16"},{"key":"9_CR56","doi-asserted-by":"crossref","unstructured":"Zhuang, J., Zeng, Y., Liu, W., Yuan, C., Chen, K.: A task is worth one word: Learning with task prompts for high-quality versatile image inpainting. arXiv preprint arXiv:2312.03594 (2023)","DOI":"10.1007\/978-3-031-73636-0_12"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72661-3_9","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,26]],"date-time":"2024-11-26T08:18:29Z","timestamp":1732609109000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72661-3_9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,27]]},"ISBN":["9783031726606","9783031726613"],"references-count":56,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72661-3_9","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,27]]},"assertion":[{"value":"27 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}