{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,12]],"date-time":"2025-11-12T14:25:41Z","timestamp":1762957541717,"version":"3.40.3"},"publisher-location":"Cham","reference-count":44,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031784941"},{"type":"electronic","value":"9783031784958"}],"license":[{"start":{"date-parts":[[2024,12,4]],"date-time":"2024-12-04T00:00:00Z","timestamp":1733270400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,4]],"date-time":"2024-12-04T00:00:00Z","timestamp":1733270400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-78495-8_7","type":"book-chapter","created":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T09:47:58Z","timestamp":1733219278000},"page":"105-121","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Font Style Translation in\u00a0Scene Text Images with\u00a0CLIPstyler"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-4334-9363","authenticated-orcid":false,"given":"Honghui","family":"Yuan","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0431-183X","authenticated-orcid":false,"given":"Keiji","family":"Yanai","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,4]]},"reference":[{"key":"7_CR1","doi-asserted-by":"crossref","unstructured":"Atarsaikhan, G., Iwana, B.K., Uchida, S.: Contained neural style transfer for decorated logo generation. In: 2018 13th IAPR International Workshop on Document Analysis Systems (DAS), pp. 317\u2013322 (2018)","DOI":"10.1109\/DAS.2018.78"},{"key":"7_CR2","doi-asserted-by":"crossref","unstructured":"Azadi, S., Fisher, M., Kim, V.G., Wang, Z., Shechtman, E., Darrell, T.: Multi-content GAN for few-shot font style transfer. In: Proceedings of IEEE Computer Vision and Pattern Recognition, pp. 7564\u20137573 (2018)","DOI":"10.1109\/CVPR.2018.00789"},{"key":"7_CR3","unstructured":"Chen, H., et\u00a0al.: DiffUTE: universal text editing diffusion model. In: Advances in Neural Information Processing Systems, vol. 36 (2024)"},{"key":"7_CR4","unstructured":"Chen, J., Huang, Y., Lv, T., Cui, L., Chen, Q., Wei, F.: TextDiffuser: diffusion models as text painters. In: Advances in Neural Information Processing Systems, vol. 36 (2024)"},{"key":"7_CR5","doi-asserted-by":"crossref","unstructured":"Deng, Y., et al.: StyTr2: image style transfer with transformers. In: Proceedings of IEEE Computer Vision and Pattern Recognition, pp. 11326\u201311336 (2022)","DOI":"10.1109\/CVPR52688.2022.01104"},{"issue":"5","key":"7_CR6","first-page":"2567","volume":"44","author":"K Ding","year":"2020","unstructured":"Ding, K., Ma, K., Wang, S., Simoncelli, E.P.: Image quality assessment: unifying structure and texture similarity. IEEE Trans. Pattern Anal. Mach. Intell. 44(5), 2567\u20132581 (2020)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"4","key":"7_CR7","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3528223.3530164","volume":"41","author":"R Gal","year":"2022","unstructured":"Gal, R., Patashnik, O., Maron, H., Bermano, A.H., Chechik, G., Cohen-Or, D.: StyleGAN-nada: CLIP-guided domain adaptation of image generators. ACM Trans. Graph. (TOG) 41(4), 1\u201313 (2022)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"7_CR8","doi-asserted-by":"crossref","unstructured":"Gatys, L.A., Ecker, A.S., Bethge, M.: Image style transfer using convolutional neural networks. In: Proceedings of IEEE Computer Vision and Pattern Recognition, pp. 2414\u20132423 (2016)","DOI":"10.1109\/CVPR.2016.265"},{"key":"7_CR9","unstructured":"Goodfellow, I., et al.: Generative adversarial nets. In: Advances in Neural Information Processing Systems, vol. 27 (2014)"},{"key":"7_CR10","doi-asserted-by":"crossref","unstructured":"Hessel, J., Holtzman, A., Forbes, M., Bras, R.L., Choi, Y.: CLIPScore: a reference-free evaluation metric for image captioning. arXiv preprint arXiv:2104.08718 (2021)","DOI":"10.18653\/v1\/2021.emnlp-main.595"},{"key":"7_CR11","unstructured":"Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: GANs trained by a two time-scale update rule converge to a local nash equilibrium. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"7_CR12","doi-asserted-by":"crossref","unstructured":"Honghui, Y., Keiji, Y.: Multi-style shape matching GAN for text images. IEICE Trans. Inf. Syst. E107-D, 505\u2013514 (2024)","DOI":"10.1587\/transinf.2023IHP0010"},{"key":"7_CR13","doi-asserted-by":"crossref","unstructured":"Huang, X., Belongie, S.: Arbitrary style transfer in real-time with adaptive instance normalization. In: Proceedings of IEEE Computer Vision and Pattern Recognition, pp. 1501\u20131510 (2017)","DOI":"10.1109\/ICCV.2017.167"},{"issue":"4","key":"7_CR14","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3592123","volume":"42","author":"S Iluz","year":"2023","unstructured":"Iluz, S., Vinker, Y., Hertz, A., Berio, D., Cohen-Or, D., Shamir, A.: Word-as-image for semantic typography. ACM Trans. Graph. (TOG) 42(4), 1\u201311 (2023)","journal-title":"ACM Trans. Graph. (TOG)"},{"key":"7_CR15","doi-asserted-by":"crossref","unstructured":"Izumi, K., Yanai, K.: Zero-shot font style transfer with a differentiable renderer. In: Proceedings of the 4th ACM International Conference on Multimedia in Asia, pp.\u00a01\u20135 (2022)","DOI":"10.1145\/3551626.3564961"},{"key":"7_CR16","unstructured":"Ji, J., et al.: Improving diffusion models for scene text editing with dual encoders. arXiv preprint arXiv:2304.05568 (2023)"},{"key":"7_CR17","doi-asserted-by":"crossref","unstructured":"Kamra, C.G., Mastan, I.D., Gupta, D.: Sem-CS: semantic CLIPStyler for text-based image style transfer. In: IEEE International Conference on Image Processing (ICIP), pp. 395\u2013399 (2023)","DOI":"10.1109\/ICIP49359.2023.10223148"},{"key":"7_CR18","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aila, T.: A style-based generator architecture for generative adversarial networks. In: Proceedings of IEEE Computer Vision and Pattern Recognition, pp. 4401\u20134410 (2019)","DOI":"10.1109\/CVPR.2019.00453"},{"key":"7_CR19","doi-asserted-by":"crossref","unstructured":"Krishnan, P., Kovvuri, R., Pang, G., Vassilev, B., Hassner, T.: TextStyleBrush: transfer of text aesthetics from a single example. IEEE Trans. Pattern Anal. Mach. Intell. (2023)","DOI":"10.1109\/TPAMI.2023.3239736"},{"key":"7_CR20","doi-asserted-by":"crossref","unstructured":"Kwon, G., Ye, J.C.: CLIPStyler: image style transfer with a single text condition. In: Proceedings of IEEE Computer Vision and Pattern Recognition, pp. 18062\u201318071 (2022)","DOI":"10.1109\/CVPR52688.2022.01753"},{"key":"7_CR21","doi-asserted-by":"crossref","unstructured":"Li, W., He, Y., Qi, Y., Li, Z., Tang, Y.: Fet-GAN: font and effect transfer via k-shot adaptive instance normalization. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a034, pp. 1717\u20131724 (2020)","DOI":"10.1609\/aaai.v34i02.5535"},{"key":"7_CR22","doi-asserted-by":"crossref","unstructured":"Luo, C., Jin, L., Chen, J.: SimAN: exploring self-supervised representation learning of scene text via similarity-aware normalization. In: Proceedings of IEEE Computer Vision and Pattern Recognition, pp. 1039\u20131048 (2022)","DOI":"10.1109\/CVPR52688.2022.00111"},{"key":"7_CR23","unstructured":"Ma, J., et al.: GlyphDraw: learning to draw Chinese characters in image synthesis models coherently. arXiv preprint arXiv:2303.17870 (2023)"},{"key":"7_CR24","doi-asserted-by":"crossref","unstructured":"Qu, Y., Tan, Q., Xie, H., Xu, J., Wang, Y., Zhang, Y.: Exploring stroke-level modifications for scene text editing. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a037, pp. 2119\u20132127 (2023)","DOI":"10.1609\/aaai.v37i2.25305"},{"key":"7_CR25","unstructured":"Radford, A., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763 (2021)"},{"key":"7_CR26","doi-asserted-by":"crossref","unstructured":"Rombach, R., Blattmann, A., Lorenz, D., Esser, P., Ommer, B.: High-resolution image synthesis with latent diffusion models. In: Proceedings of IEEE Computer Vision and Pattern Recognition, pp. 10684\u201310695 (2022)","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"7_CR27","doi-asserted-by":"crossref","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-net: convolutional networks for biomedical image segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI), pp. 234\u2013241 (2015)","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"7_CR28","doi-asserted-by":"crossref","unstructured":"Roy, P., Bhattacharya, S., Ghosh, S., Pal, U.: STEFANN: scene text editor using font adaptive neural network. In: Proceedings of IEEE Computer Vision and Pattern Recognition, pp. 13228\u201313237 (2020)","DOI":"10.1109\/CVPR42600.2020.01324"},{"key":"7_CR29","unstructured":"Song, Y., Zhang, Y.: CLIPFont: text guided vector wordart generation. In: British Machine Vision Conference. BMVA Press (2022). https:\/\/bmvc2022.mpi-inf.mpg.de\/0543.pdf"},{"issue":"8","key":"7_CR30","doi-asserted-by":"publisher","first-page":"3998","DOI":"10.1109\/TIP.2018.2831899","volume":"27","author":"H Talebi","year":"2018","unstructured":"Talebi, H., Milanfar, P.: NIMA: neural image assessment. IEEE Trans. Image Process. 27(8), 3998\u20134011 (2018)","journal-title":"IEEE Trans. Image Process."},{"key":"7_CR31","doi-asserted-by":"crossref","unstructured":"Tanveer, M., Wang, Y., Mahdavi-Amiri, A., Zhang, H.: DS-fusion: artistic typography via discriminated and stylized diffusion. In: Proceedings of IEEE International Conference on Computer Vision, pp. 374\u2013384 (2023)","DOI":"10.1109\/ICCV51070.2023.00041"},{"key":"7_CR32","unstructured":"Vaswani, A., et al.: Attention is all you need. In: Advances in Neural Information Processing Systems, vol. 30 (2017)"},{"key":"7_CR33","unstructured":"Veit, A., Matera, T., Neumann, L., Matas, J., Belongie, S.: Coco-text: dataset and benchmark for text detection and recognition in natural images. arXiv preprint arXiv:1601.07140 (2016)"},{"key":"7_CR34","doi-asserted-by":"crossref","unstructured":"Wang, C., Zhou, M., Ge, T., Jiang, Y., Bao, H., Xu, W.: CF-Font: content fusion for few-shot font generation. In: Proceedings of IEEE Computer Vision and Pattern Recognition, pp. 1858\u20131867 (2023)","DOI":"10.1109\/CVPR52729.2023.00185"},{"key":"7_CR35","doi-asserted-by":"crossref","unstructured":"Wang, W., Liu, J., Yang, S., Guo, Z.: Typography with decor: intelligent text style transfer. In: Proceedings of IEEE Computer Vision and Pattern Recognition, pp. 5889\u20135897 (2019)","DOI":"10.1109\/CVPR.2019.00604"},{"key":"7_CR36","doi-asserted-by":"crossref","unstructured":"Wu, L., et al.: Editing text in the wild. In: Proceedings of ACM International Conference Multimedia, pp. 1500\u20131508 (2019)","DOI":"10.1145\/3343031.3350929"},{"key":"7_CR37","doi-asserted-by":"crossref","unstructured":"Xie, Y., Chen, X., Sun, L., Lu, Y.: DG-Font: deformable generative networks for unsupervised font generation. In: Proceedings of IEEE Computer Vision and Pattern Recognition, pp. 5130\u20135140 (2021)","DOI":"10.1109\/CVPR46437.2021.00509"},{"key":"7_CR38","doi-asserted-by":"crossref","unstructured":"Xu, W., Long, C., Wang, R., Wang, G.: DRB-GAN: a dynamic resblock generative adversarial network for artistic style transfer. In: Proceedings of IEEE International Conference on Computer Vision, pp. 6383\u20136392 (2021)","DOI":"10.1109\/ICCV48922.2021.00632"},{"key":"7_CR39","doi-asserted-by":"crossref","unstructured":"Yang, Q., Huang, J., Lin, W.: SwapText: image based texts transfer in scenes. In: Proceedings of IEEE Computer Vision and Pattern Recognition, pp. 14700\u201314709 (2020)","DOI":"10.1109\/CVPR42600.2020.01471"},{"key":"7_CR40","doi-asserted-by":"crossref","unstructured":"Yang, S., Liu, J., Wang, W., Guo, Z.: TET-GAN: text effects transfer via stylization and destylization. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a033, pp. 1238\u20131245 (2019)","DOI":"10.1609\/aaai.v33i01.33011238"},{"key":"7_CR41","doi-asserted-by":"crossref","unstructured":"Yang, S., Wang, Z., Wang, Z., Xu, N., Liu, J., Guo, Z.: Controllable artistic text style transfer via shape-matching GAN. In: Proceedings of IEEE Computer Vision and Pattern Recognition, pp. 4442\u20134451 (2019)","DOI":"10.1109\/ICCV.2019.00454"},{"key":"7_CR42","unstructured":"Yang, Y., et al.: GlyphControl: glyph conditional control for visual text generation. In: Advances in Neural Information Processing Systems, vol. 36 (2024)"},{"key":"7_CR43","unstructured":"Yang, Z., Song, H., Wu, Q.: Generative artisan: a semantic-aware and controllable clipstyler. arXiv preprint arXiv:2207.11598 (2022)"},{"key":"7_CR44","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models. In: Proceedings of IEEE International Conference on Computer Vision, pp. 3836\u20133847 (2023)","DOI":"10.1109\/ICCV51070.2023.00355"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-78495-8_7","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,3]],"date-time":"2024-12-03T10:25:20Z","timestamp":1733221520000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-78495-8_7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,4]]},"ISBN":["9783031784941","9783031784958"],"references-count":44,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-78495-8_7","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2024,12,4]]},"assertion":[{"value":"4 December 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICPR","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Pattern Recognition","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Kolkata","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"India","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1 December 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5 December 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icpr2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/icpr2024.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}