{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T16:07:35Z","timestamp":1778083655759,"version":"3.51.4"},"publisher-location":"Cham","reference-count":43,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031729669","type":"print"},{"value":"9783031729676","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:00:00Z","timestamp":1730592000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:00:00Z","timestamp":1730592000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72967-6_11","type":"book-chapter","created":{"date-parts":[[2024,11,2]],"date-time":"2024-11-02T19:09:30Z","timestamp":1730574570000},"page":"184-199","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":10,"title":["Improving Virtual Try-On with\u00a0Garment-Focused Diffusion Models"],"prefix":"10.1007","author":[{"given":"Siqi","family":"Wan","sequence":"first","affiliation":[]},{"given":"Yehao","family":"Li","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7917-6003","authenticated-orcid":false,"given":"Jingwen","family":"Chen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4344-8898","authenticated-orcid":false,"given":"Yingwei","family":"Pan","sequence":"additional","affiliation":[]},{"given":"Ting","family":"Yao","sequence":"additional","affiliation":[]},{"given":"Yang","family":"Cao","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5990-7307","authenticated-orcid":false,"given":"Tao","family":"Mei","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,3]]},"reference":[{"key":"11_CR1","doi-asserted-by":"crossref","unstructured":"Bai, S., Zhou, H., Li, Z., Zhou, C., Yang, H.: Single stage virtual try-on via deformable attention flows. In: ECCV (2022)","DOI":"10.1007\/978-3-031-19784-0_24"},{"key":"11_CR2","unstructured":"Bi\u0144kowski, M., Sutherland, D.J., Arbel, M., Gretton, A.: Demystifying mmd GANs. In: ICLR (2018)"},{"issue":"6","key":"11_CR3","doi-asserted-by":"publisher","first-page":"567","DOI":"10.1109\/34.24792","volume":"11","author":"FL Bookstein","year":"1989","unstructured":"Bookstein, F.L.: Principal warps: thin-plate splines and the decomposition of deformations. IEEE TPAMI 11(6), 567\u2013585 (1989)","journal-title":"IEEE TPAMI"},{"key":"11_CR4","doi-asserted-by":"crossref","unstructured":"Chen, J., Pan, Y., Yao, T., Mei, T.: Controlstyle: text-driven stylized image generation using diffusion priors. In: ACM MM (2023)","DOI":"10.1145\/3581783.3612524"},{"key":"11_CR5","doi-asserted-by":"crossref","unstructured":"Chen, Y., Pan, Y., Li, Y., Yao, T., Mei, T.: Control3d: towards controllable text-to-3D generation. In: ACM MM (2023)","DOI":"10.1145\/3581783.3612489"},{"key":"11_CR6","doi-asserted-by":"crossref","unstructured":"Choi, S., Park, S., Lee, M., Choo, J.: Viton-HD: high-resolution virtual try-on via misalignment-aware normalization. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01391"},{"issue":"5","key":"11_CR7","first-page":"2567","volume":"44","author":"K Ding","year":"2020","unstructured":"Ding, K., Ma, K., Wang, S., Simoncelli, E.P.: Image quality assessment: unifying structure and texture similarity. IEEE TPAMI 44(5), 2567\u20132581 (2020)","journal-title":"IEEE TPAMI"},{"key":"11_CR8","doi-asserted-by":"crossref","unstructured":"Dong, H., Liang, X., Shen, X., Wu, B., Chen, B.C., Yin, J.: FW-GAN: flow-navigated warping GAN for video virtual try-on. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00125"},{"key":"11_CR9","doi-asserted-by":"crossref","unstructured":"Fenocchi, E., Morelli, D., Cornia, M., Baraldi, L., Cesari, F., Cucchiara, R.: Dual-branch collaborative transformer for virtual try-on. In: CVPR Workshops (2022)","DOI":"10.1109\/CVPRW56347.2022.00246"},{"key":"11_CR10","doi-asserted-by":"crossref","unstructured":"Ge, Y., Song, Y., Zhang, R., Ge, C., Liu, W., Luo, P.: Parser-free virtual try-on via distilling appearance flows. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00838"},{"key":"11_CR11","unstructured":"Goodfellow, I., et al.: Generative adversarial nets. In: NeurIPS (2014)"},{"key":"11_CR12","doi-asserted-by":"crossref","unstructured":"Gou, J., Sun, S., Zhang, J., Si, J., Qian, C., Zhang, L.: Taming the power of diffusion models for high-quality virtual try-on with appearance flow. In: ACM MM (2023)","DOI":"10.1145\/3581783.3612255"},{"key":"11_CR13","doi-asserted-by":"crossref","unstructured":"Gu, S., et al.: Vector quantized diffusion model for text-to-image synthesis. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01043"},{"key":"11_CR14","doi-asserted-by":"crossref","unstructured":"Han, X., Wu, Z., Wu, Z., Yu, R., Davis, L.S.: Viton: an image-based virtual try-on network. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00787"},{"key":"11_CR15","doi-asserted-by":"crossref","unstructured":"He, S., Song, Y.Z., Xiang, T.: Style-based global appearance flow for virtual try-on. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.00346"},{"key":"11_CR16","unstructured":"Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: GANs trained by a two time-scale update rule converge to a local nash equilibrium. In: NeurIPS (2017)"},{"key":"11_CR17","unstructured":"Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In: NeurIPS (2020)"},{"key":"11_CR18","unstructured":"Ho, J., Salimans, T.: Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598 (2022)"},{"key":"11_CR19","first-page":"5","volume":"4","author":"G Ilharco","year":"2021","unstructured":"Ilharco, G., et al.: Openclip. Zenodo 4, 5 (2021)","journal-title":"Zenodo"},{"key":"11_CR20","unstructured":"Kingma, D.P., Welling, M.: Auto-encoding variational bayes. In: ICLR (2014)"},{"key":"11_CR21","doi-asserted-by":"crossref","unstructured":"Lee, S., Gu, G., Park, S., Choi, S., Choo, J.: High-resolution virtual try-on with misalignment and occlusion-handled conditions. In: ECCV (2022)","DOI":"10.1007\/978-3-031-19790-1_13"},{"key":"11_CR22","doi-asserted-by":"crossref","unstructured":"Li, K., Chong, M.J., Zhang, J., Liu, J.: Toward accurate and realistic outfits visualization with attention to details. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01529"},{"key":"11_CR23","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: ICLR (2019)"},{"key":"11_CR24","unstructured":"Minar, M.R., Tuan, T.T., Ahn, H., Rosin, P., Lai, Y.K.: CP-VTON+: clothing shape and texture preserving image-based virtual try-on. In: CVPR Workshops (2020)"},{"key":"11_CR25","doi-asserted-by":"crossref","unstructured":"Morelli, D., Baldrati, A., Cartella, G., Cornia, M., Bertini, M., Cucchiara, R.: LADI-VTON: latent diffusion textual-inversion enhanced virtual try-on. In: ACM MM (2023)","DOI":"10.1145\/3581783.3612137"},{"key":"11_CR26","doi-asserted-by":"crossref","unstructured":"Morelli, D., Fincato, M., Cornia, M., Landi, F., Cesari, F., Cucchiara, R.: Dress code: high-resolution multi-category virtual try-on. In: CVPR Workshops (2022)","DOI":"10.1109\/CVPRW56347.2022.00243"},{"key":"11_CR27","doi-asserted-by":"crossref","unstructured":"Qian, Y., et al.: Boosting diffusion models with moving average sampling in frequency domain. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00851"},{"key":"11_CR28","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: ICML (2021)"},{"key":"11_CR29","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: CVPR (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"11_CR30","doi-asserted-by":"crossref","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-net: convolutional networks for biomedical image segmentation. In: MICCAI (2015)","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"11_CR31","unstructured":"Sohl-Dickstein, J., Weiss, E., Maheswaranathan, N., Ganguli, S.: Deep unsupervised learning using nonequilibrium thermodynamics. In: ICML (2015)"},{"key":"11_CR32","unstructured":"Song, J., Meng, C., Ermon, S.: Denoising diffusion implicit models. In: ICLR (2021)"},{"key":"11_CR33","unstructured":"Song, Y., Ermon, S.: Generative modeling by estimating gradients of the data distribution. In: NeurIPS (2019)"},{"key":"11_CR34","doi-asserted-by":"crossref","unstructured":"Wang, B., Zheng, H., Liang, X., Chen, Y., Lin, L., Yang, M.: Toward characteristic-preserving image-based virtual try-on network. In: ECCV (2018)","DOI":"10.1007\/978-3-030-01261-8_36"},{"issue":"4","key":"11_CR35","first-page":"600","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. IEEE TIP 13(4), 600\u2013612 (2004)","journal-title":"IEEE TIP"},{"key":"11_CR36","doi-asserted-by":"crossref","unstructured":"Xie, Z., et al.: GP-VTON: towards general purpose virtual try-on via collaborative local-flow global-parsing learning. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.02255"},{"key":"11_CR37","doi-asserted-by":"crossref","unstructured":"Yang, B., et al.: Paint by example: exemplar-based image editing with diffusion models. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01763"},{"key":"11_CR38","doi-asserted-by":"crossref","unstructured":"Yang, H., Zhang, R., Guo, X., Liu, W., Zuo, W., Luo, P.: Towards photo-realistic virtual try-on by adaptively generating-preserving image content. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00787"},{"key":"11_CR39","doi-asserted-by":"crossref","unstructured":"Yu, R., Wang, X., Xie, X.: VTNFP: an image-based virtual try-on network with body and clothing feature preservation. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.01061"},{"key":"11_CR40","doi-asserted-by":"crossref","unstructured":"Zhang, R., Isola, P., Efros, A.A., Shechtman, E., Wang, O.: The unreasonable effectiveness of deep features as a perceptual metric. In: CVPR (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"key":"11_CR41","doi-asserted-by":"crossref","unstructured":"Zhang, Z., et al.: Trip: temporal residual learning with image noise prior for image-to-video diffusion models. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00828"},{"key":"11_CR42","doi-asserted-by":"crossref","unstructured":"Zhu, L., et al.: Tryondiffusion: a tale of two unets. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00447"},{"key":"11_CR43","doi-asserted-by":"crossref","unstructured":"Zhu, R., et al.: SD-DIT: unleashing the power of self-supervised discrimination in diffusion transformer. In: CVPR (2024)","DOI":"10.1109\/CVPR52733.2024.00806"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72967-6_11","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,2]],"date-time":"2024-11-02T19:13:44Z","timestamp":1730574824000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72967-6_11"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,3]]},"ISBN":["9783031729669","9783031729676"],"references-count":43,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72967-6_11","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,3]]},"assertion":[{"value":"3 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}