{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,12]],"date-time":"2026-02-12T17:29:07Z","timestamp":1770917347162,"version":"3.50.1"},"publisher-location":"Cham","reference-count":61,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031729515","type":"print"},{"value":"9783031729522","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,10,1]],"date-time":"2024-10-01T00:00:00Z","timestamp":1727740800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72952-2_7","type":"book-chapter","created":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T05:02:02Z","timestamp":1727672522000},"page":"110-126","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":9,"title":["Improving Text-Guided Object Inpainting with\u00a0Semantic Pre-inpainting"],"prefix":"10.1007","author":[{"given":"Yifu","family":"Chen","sequence":"first","affiliation":[]},{"given":"Jingwen","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Yingwei","family":"Pan","sequence":"additional","affiliation":[]},{"given":"Yehao","family":"Li","sequence":"additional","affiliation":[]},{"given":"Ting","family":"Yao","sequence":"additional","affiliation":[]},{"given":"Zhineng","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Tao","family":"Mei","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,1]]},"reference":[{"issue":"4","key":"7_CR1","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3592450","volume":"42","author":"O Avrahami","year":"2023","unstructured":"Avrahami, O., Fried, O., Lischinski, D.: Blended latent diffusion. ACM Trans. Graph. 42(4), 1\u201311 (2023)","journal-title":"ACM Trans. Graph."},{"key":"7_CR2","doi-asserted-by":"crossref","unstructured":"Avrahami, O., Lischinski, D., Fried, O.: Blended diffusion for text-driven editing of natural images. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01767"},{"issue":"8","key":"7_CR3","doi-asserted-by":"publisher","first-page":"1200","DOI":"10.1109\/83.935036","volume":"10","author":"C Ballester","year":"2001","unstructured":"Ballester, C., Bertalmio, M., Caselles, V., Sapiro, G., Verdera, J.: Filling-in by joint interpolation of vector fields and gray levels. IEEE Trans. Image Process. 10(8), 1200\u20131211 (2001)","journal-title":"IEEE Trans. Image Process."},{"issue":"3","key":"7_CR4","doi-asserted-by":"publisher","first-page":"24","DOI":"10.1145\/1531326.1531330","volume":"28","author":"C Barnes","year":"2009","unstructured":"Barnes, C., Shechtman, E., Finkelstein, A., Goldman, D.B.: PatchMatch: a randomized correspondence algorithm for structural image editing. ACM Trans. Graph. 28(3), 24 (2009)","journal-title":"ACM Trans. Graph."},{"key":"7_CR5","doi-asserted-by":"crossref","unstructured":"Bertalmio, M., Sapiro, G., Caselles, V., Ballester, C.: Image inpainting. In: SIGGRAPH (2000)","DOI":"10.1145\/344779.344972"},{"key":"7_CR6","doi-asserted-by":"crossref","unstructured":"Brooks, T., Holynski, A., Efros, A.A.: InstructPix2Pix: learning to follow image editing instructions. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01764"},{"key":"7_CR7","doi-asserted-by":"crossref","unstructured":"Chen, J., Pan, Y., Yao, T., Mei, T.: ControlStyle: text-driven stylized image generation using diffusion priors. In: ACM MM (2023)","DOI":"10.1145\/3581783.3612524"},{"key":"7_CR8","doi-asserted-by":"crossref","unstructured":"Chen, Y., Pan, Y., Li, Y., Yao, T., Mei, T.: Control3D: towards controllable text-to-3D generation. In: ACM MM (2023)","DOI":"10.1145\/3581783.3612489"},{"issue":"9","key":"7_CR9","doi-asserted-by":"publisher","first-page":"1200","DOI":"10.1109\/TIP.2004.833105","volume":"13","author":"A Criminisi","year":"2004","unstructured":"Criminisi, A., P\u00e9rez, P., Toyama, K.: Region filling and object removal by exemplar-based image inpainting. IEEE Trans. Image Process. 13(9), 1200\u20131212 (2004)","journal-title":"IEEE Trans. Image Process."},{"key":"7_CR10","unstructured":"Dhariwal, P., Nichol, A.: Diffusion models beat GANs on image synthesis. In: NeurIPS, vol. 34, pp. 8780\u20138794 (2021)"},{"key":"7_CR11","doi-asserted-by":"crossref","unstructured":"Feng, Z., et\u00a0al.: ERNIE-ViLG 2.0: improving text-to-image diffusion model with knowledge-enhanced mixture-of-denoising-experts. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00977"},{"issue":"11","key":"7_CR12","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1145\/3422622","volume":"63","author":"I Goodfellow","year":"2020","unstructured":"Goodfellow, I., et al.: Generative adversarial networks. Commun. ACM 63(11), 139\u2013144 (2020)","journal-title":"Commun. ACM"},{"key":"7_CR13","unstructured":"Hertz, A., Mokady, R., Tenenbaum, J., Aberman, K., Pritch, Y., Cohen-Or, D.: Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626 (2022)"},{"key":"7_CR14","doi-asserted-by":"crossref","unstructured":"Hessel, J., Holtzman, A., Forbes, M., Bras, R.L., Choi, Y.: CLIPScore: a reference-free evaluation metric for image captioning. arXiv preprint arXiv:2104.08718 (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.595"},{"key":"7_CR15","unstructured":"Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: GANs trained by a two time-scale update rule converge to a local nash equilibrium. In: NeurIPS (2017)"},{"key":"7_CR16","unstructured":"Ho, J., et\u00a0al.: Imagen video: high definition video generation with diffusion models. arXiv preprint arXiv:2210.02303 (2022)"},{"key":"7_CR17","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In: NeurIPS (2020)"},{"key":"7_CR18","unstructured":"Ho, J., Salimans, T.: Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598 (2022)"},{"key":"7_CR19","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)"},{"key":"7_CR20","unstructured":"Kingma, D.P., Welling, M.: Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114 (2013)"},{"issue":"7","key":"7_CR21","doi-asserted-by":"publisher","first-page":"1956","DOI":"10.1007\/s11263-020-01316-z","volume":"128","author":"A Kuznetsova","year":"2020","unstructured":"Kuznetsova, A., et al.: The open images dataset V4: unified image classification, object detection, and visual relationship detection at scale. Int. J. Comput. Vision 128(7), 1956\u20131981 (2020)","journal-title":"Int. J. Comput. Vision"},{"key":"7_CR22","doi-asserted-by":"crossref","unstructured":"Li, W., Lin, Z., Zhou, K., Qi, L., Wang, Y., Jia, J.: MAT: mask-aware transformer for large hole image inpainting. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01049"},{"issue":"2","key":"7_CR23","doi-asserted-by":"publisher","first-page":"1489","DOI":"10.1109\/TPAMI.2022.3164083","volume":"45","author":"Y Li","year":"2022","unstructured":"Li, Y., Yao, T., Pan, Y., Mei, T.: Contextual transformer networks for visual recognition. IEEE Trans. Pattern Anal. Mach. Intell. 45(2), 1489\u20131500 (2022)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"7_CR24","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"T-Y Lin","year":"2014","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"7_CR25","doi-asserted-by":"crossref","unstructured":"Liu, G., Reda, F.A., Shih, K.J., Wang, T., Tao, A., Catanzaro, B.: Image inpainting for irregular holes using partial convolutions. In: ECCV (2018)","DOI":"10.1007\/978-3-030-01252-6_6"},{"key":"7_CR26","doi-asserted-by":"crossref","unstructured":"Liu, H., Wan, Z., Huang, W., Song, Y., Han, X., Liao, J.: PD-GAN: probabilistic diverse GAN for image inpainting. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00925"},{"key":"7_CR27","unstructured":"Navaneet, K., Koohpayegani, S.A., Tejankar, A., Pirsiavash, H.: SimReg: regression as a simple yet effective tool for self-supervised knowledge distillation. arXiv preprint arXiv:2201.05131 (2022)"},{"key":"7_CR28","unstructured":"Nichol, A., et al.: GLIDE: towards photorealistic image generation and editing with text-guided diffusion models. arXiv preprint arXiv:2112.10741 (2021)"},{"key":"7_CR29","doi-asserted-by":"crossref","unstructured":"Pan, Y., Qiu, Z., Yao, T., Li, H., Mei, T.: To create what you tell: generating videos from captions. In: ACM MM (2017)","DOI":"10.1145\/3123266.3127905"},{"key":"7_CR30","doi-asserted-by":"crossref","unstructured":"Peng, J., Liu, D., Xu, S., Li, H.: Generating diverse structure for image inpainting with hierarchical VQ-VAE. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01063"},{"key":"7_CR31","unstructured":"von Platen, P., et al.: Diffusers: state-of-the-art diffusion models (2022). https:\/\/github.com\/huggingface\/diffusers"},{"key":"7_CR32","unstructured":"Poole, B., Jain, A., Barron, J.T., Mildenhall, B.: DreamFusion: text-to-3D using 2D diffusion. arXiv preprint arXiv:2209.14988 (2022)"},{"key":"7_CR33","doi-asserted-by":"publisher","first-page":"2405","DOI":"10.1109\/TIP.2022.3152624","volume":"31","author":"W Quan","year":"2022","unstructured":"Quan, W., Zhang, R., Zhang, Y., Li, Z., Wang, J., Yan, D.: Image inpainting with local and global refinement. IEEE Trans. Image Process. 31, 2405\u20132420 (2022)","journal-title":"IEEE Trans. Image Process."},{"key":"7_CR34","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: ICML (2021)"},{"key":"7_CR35","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"7_CR36","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"234","DOI":"10.1007\/978-3-319-24574-4_28","volume-title":"Medical Image Computing and Computer-Assisted Intervention \u2013 MICCAI 2015","author":"O Ronneberger","year":"2015","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-net: convolutional networks for biomedical image segmentation. In: Navab, N., Hornegger, J., Wells, W.M., Frangi, A.F. (eds.) MICCAI 2015. LNCS, vol. 9351, pp. 234\u2013241. Springer, Cham (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28"},{"key":"7_CR37","unstructured":"Saharia, C., et al.: Photorealistic text-to-image diffusion models with deep language understanding. In: NeurIPS, vol. 35, pp. 36479\u201336494 (2022)"},{"key":"7_CR38","unstructured":"Seitzer, M.: pytorch-fid: FID score for PyTorch (2020). https:\/\/github.com\/mseitzer\/pytorch-fid. Version 0.3.0"},{"key":"7_CR39","unstructured":"Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502 (2020)"},{"key":"7_CR40","doi-asserted-by":"crossref","unstructured":"Song, Y., et al.: Contextual-based image inpainting: infer, match, and translate. In: ECCV (2018)","DOI":"10.1007\/978-3-030-01216-8_1"},{"key":"7_CR41","doi-asserted-by":"crossref","unstructured":"Tang, J., et al.: Make-it-3D: high-fidelity 3D creation from a single image with diffusion prior. arXiv preprint arXiv:2303.14184 (2023)","DOI":"10.1109\/ICCV51070.2023.02086"},{"key":"7_CR42","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NeurIPS, vol. 30 (2017)"},{"key":"7_CR43","doi-asserted-by":"crossref","unstructured":"Wei, C., Fan, H., Xie, S., Wu, C.Y., Yuille, A., Feichtenhofer, C.: Masked feature prediction for self-supervised visual pre-training. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01426"},{"key":"7_CR44","doi-asserted-by":"crossref","unstructured":"Wu, J.Z., et al.: Tune-a-video: one-shot tuning of image diffusion models for text-to-video generation. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00701"},{"key":"7_CR45","doi-asserted-by":"crossref","unstructured":"Xie, S., Zhang, Z., Lin, Z., Hinz, T., Zhang, K.: SmartBrush: text and shape guided object inpainting with diffusion model. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.02148"},{"key":"7_CR46","unstructured":"Xue, Z., et al.: RAPHAEL: text-to-image generation via large mixture of diffusion paths. arXiv preprint arXiv:2305.18295 (2023)"},{"key":"7_CR47","doi-asserted-by":"crossref","unstructured":"Yang, C., Lu, X., Lin, Z., Shechtman, E., Wang, O., Li, H.: High-resolution image inpainting using multi-scale neural patch synthesis. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.434"},{"key":"7_CR48","doi-asserted-by":"crossref","unstructured":"Yao, T., Li, Y., Pan, Y., Mei, T.: HIRI-ViT: scaling vision transformer with high resolution inputs. IEEE Trans. Pattern Anal. Mach. Intell. (2024)","DOI":"10.1109\/TPAMI.2024.3379457"},{"issue":"9","key":"7_CR49","doi-asserted-by":"publisher","first-page":"10870","DOI":"10.1109\/TPAMI.2023.3268446","volume":"45","author":"T Yao","year":"2023","unstructured":"Yao, T., Li, Y., Pan, Y., Wang, Y., Zhang, X.P., Mei, T.: Dual vision transformer. IEEE Trans. Pattern Anal. Mach. Intell. 45(9), 10870\u201310882 (2023)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"7_CR50","doi-asserted-by":"crossref","unstructured":"Yi, Z., Tang, Q., Azizi, S., Jang, D., Xu, Z.: Contextual residual aggregation for ultra high-resolution image inpainting. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00753"},{"key":"7_CR51","doi-asserted-by":"crossref","unstructured":"Yu, J., Lin, Z., Yang, J., Shen, X., Lu, X., Huang, T.S.: Generative image inpainting with contextual attention. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00577"},{"key":"7_CR52","doi-asserted-by":"crossref","unstructured":"Yu, J., Lin, Z., Yang, J., Shen, X., Lu, X., Huang, T.S.: Free-form image inpainting with gated convolution. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00457"},{"key":"7_CR53","doi-asserted-by":"crossref","unstructured":"Zeng, Y., Fu, J., Chao, H., Guo, B.: Learning pyramid-context encoder network for high-quality image inpainting. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00158"},{"key":"7_CR54","doi-asserted-by":"crossref","unstructured":"Zhang, L., Chen, Q., Hu, B., Jiang, S.: Text-guided neural image inpainting. In: ACM MM (2020)","DOI":"10.1145\/3394171.3414017"},{"key":"7_CR55","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models. In: ICCV (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"7_CR56","doi-asserted-by":"crossref","unstructured":"Zhang, Z., et al.: TRIP: temporal residual learning with image noise prior for image-to-video diffusion models. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00828"},{"key":"7_CR57","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Zhao, Z., Zhang, Z., Huai, B., Yuan, J.: Text-guided image inpainting. In: ACM MM (2020)","DOI":"10.1145\/3394171.3414017"},{"key":"7_CR58","doi-asserted-by":"crossref","unstructured":"Zhao, L., et al.: UCTGAN: diverse image inpainting based on unsupervised cross-space translation. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00578"},{"key":"7_CR59","unstructured":"Zhao, S., et al.: Uni-ControlNet: all-in-one control to text-to-image diffusion models. arXiv preprint arXiv:2305.16322 (2023)"},{"key":"7_CR60","doi-asserted-by":"crossref","unstructured":"Zheng, C., Cham, T.J., Cai, J.: Pluralistic image completion. In: CVPR (2019)","DOI":"10.1109\/CVPR.2019.00153"},{"key":"7_CR61","doi-asserted-by":"crossref","unstructured":"Zhu, R., et al.: SD-DiT: unleashing the power of self-supervised discrimination in diffusion transformer. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00806"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72952-2_7","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,30]],"date-time":"2024-09-30T05:08:37Z","timestamp":1727672917000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72952-2_7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,1]]},"ISBN":["9783031729515","9783031729522"],"references-count":61,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72952-2_7","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,10,1]]},"assertion":[{"value":"1 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}