{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,12]],"date-time":"2025-11-12T14:23:47Z","timestamp":1762957427936},"reference-count":29,"publisher":"Institute of Electronics, Information and Communications Engineers (IEICE)","issue":"4","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEICE Trans. Inf. &amp; Syst."],"published-print":{"date-parts":[[2024,4,1]]},"DOI":"10.1587\/transinf.2023ihp0010","type":"journal-article","created":{"date-parts":[[2024,3,31]],"date-time":"2024-03-31T22:20:24Z","timestamp":1711923624000},"page":"505-514","source":"Crossref","is-referenced-by-count":2,"title":["Multi-Style Shape Matching GAN for Text Images"],"prefix":"10.1587","volume":"E107.D","author":[{"given":"Honghui","family":"YUAN","sequence":"first","affiliation":[{"name":"Department of Informatics, The University of Electro-Communications"}]},{"given":"Keiji","family":"YANAI","sequence":"additional","affiliation":[{"name":"Department of Informatics, The University of Electro-Communications"}]}],"member":"532","reference":[{"key":"1","doi-asserted-by":"crossref","unstructured":"[1] S. Yang, Z. Wang, Z. Wang, N. Xu, J. Liu, and Z. Guo, \u201cControllable artistic text style transfer via shape-matching GAN,\u201d Proc. IEEE Computer Vision and Pattern Recognition, pp.4442-4451, 2019.","DOI":"10.1109\/ICCV.2019.00454"},{"key":"2","doi-asserted-by":"crossref","unstructured":"[2] T. Park, M.-Y. Liu, T.-C. Wang, and J.-Y. Zhu, \u201cSemantic image synthesis with spatially-adaptive normalization,\u201d Proc. IEEE Computer Vision and Pattern Recognition, pp.2337-2346, 2019.","DOI":"10.1109\/CVPR.2019.00244"},{"key":"3","doi-asserted-by":"crossref","unstructured":"[3] P. Zhu, R. Abdal, Y. Qin, and P. Wonka, \u201cSEAN: Image synthesis with semantic region-adaptive normalization,\u201d Proc. IEEE Computer Vision and Pattern Recognition, pp.5104-5113, 2020.","DOI":"10.1109\/CVPR42600.2020.00515"},{"key":"4","doi-asserted-by":"crossref","unstructured":"[4] H. Yuan and K. Yanai, \u201cMulti-style transfer generative adversarial network for text images,\u201d Proc. IEEE International Conference on Multimedia Information Processing and Retrieval (MIPR), pp.63-69, IEEE, 2021. https:\/\/ieeexplore.ieee.org\/abstract\/document\/9565534.","DOI":"10.1109\/MIPR51284.2021.00017"},{"key":"5","doi-asserted-by":"crossref","unstructured":"[5] P. Isola, J.-Y. Zhu, T. Zhou, and A.A. Efros, \u201cImage-to-image translation with conditional adversarial networks,\u201d Proc. IEEE Computer Vision and Pattern Recognition, pp.1125-1134, 2017.","DOI":"10.1109\/CVPR.2017.632"},{"key":"6","doi-asserted-by":"crossref","unstructured":"[6] J.-Y. Zhu, T. Park, P. Isola, and A.A. Efros, \u201cUnpaired image-to-image translation using cycle-consistent adversarial networks,\u201d Proc. IEEE International Conference on Computer Vision, pp.2223-2232, 2017.","DOI":"10.1109\/ICCV.2017.244"},{"key":"7","unstructured":"[7] J.Y. Zhu, R. Zhang, D. Pathak, T. Darrell, A.A. Efros, O. Wang, and E. Shechtman, \u201cToward multimodal image-to-image translation,\u201d Advances in Neural Information Processing Systems, pp.465-476, 2017."},{"key":"8","unstructured":"[8] M.Y. Liu, T. Breuel, and J. Kautz, \u201cUnsupervised image-to-image translation networks,\u201d Advances in Neural Information Processing Systems, pp.700-708, 2017."},{"key":"9","doi-asserted-by":"crossref","unstructured":"[9] X. Huang, M.Y. Liu, S. Belongie, and J. Kautz, \u201cMultimodal unsupervised image-to-image translation,\u201d Proc. European Conference on Computer Vision, pp.172-189, 2018.","DOI":"10.1007\/978-3-030-01219-9_11"},{"key":"10","doi-asserted-by":"crossref","unstructured":"[10] Y. Choi, M. Choi, M. Kim, J.-W. Ha, S. Kim, and J. Choo, \u201cStargan: Unified generative adversarial networks for multi-domain image-to-image translation,\u201d Proc. IEEE Computer Vision and Pattern Recognition, pp.8789-8797, 2018.","DOI":"10.1109\/CVPR.2018.00916"},{"key":"11","unstructured":"[11] M. Mirza and S. Osindero, \u201cConditional generative adversarial nets,\u201d arXiv preprint arXiv:1411.1784, 2014."},{"key":"12","unstructured":"[12] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville, and Y. Bengio, \u201cGenerative adversarial nets,\u201d Advances in Neural Information Processing Systems, pp.2672-2680, 2014."},{"key":"13","doi-asserted-by":"crossref","unstructured":"[13] L.A. Gatys, A.S. Ecker, and M. Bethge, \u201cImage style transfer using convolutional neural networks,\u201d Proc. IEEE Computer Vision and Pattern Recognition, pp.2414-2423, 2016.","DOI":"10.1109\/CVPR.2016.265"},{"key":"14","doi-asserted-by":"crossref","unstructured":"[14] J. Johnson, A. Alahi, and L. Fei-Fei, \u201cPerceptual losses for real-time style transfer and super-resolution,\u201d Proc. European Conference on Computer Vision, pp.694-711, 2016.","DOI":"10.1007\/978-3-319-46475-6_43"},{"key":"15","doi-asserted-by":"crossref","unstructured":"[15] X. Huang and S. Belongie, \u201cArbitrary style transfer in real-time with adaptive instance normalization,\u201d Proc. IEEE Computer Vision and Pattern Recognition, pp.1501-1510, 2017.","DOI":"10.1109\/ICCV.2017.167"},{"key":"16","unstructured":"[16] T. Park, J.Y. Zhu, O. Wang, J. Lu, E. Shechtman, A.A. Efros, and R. Zhang, \u201cSwapping autoencoder for deep image manipulation,\u201d arXiv preprint arXiv:2007.00653, 2020."},{"key":"17","doi-asserted-by":"crossref","unstructured":"[17] T. Karras, S. Laine, and T. Aila, \u201cA style-based generator architecture for generative adversarial networks,\u201d Proc. IEEE Computer Vision and Pattern Recognition, pp.4401-4410, 2019.","DOI":"10.1109\/CVPR.2019.00453"},{"key":"18","doi-asserted-by":"crossref","unstructured":"[18] Y. Deng, F. Tang, X. Pan, W. Dong, C. Ma, and C. Xu, \u201cStyTr\u02c62: Unbiased image style transfer with transformers,\u201d arXiv preprint arXiv:2105.14576, 2021.","DOI":"10.1109\/CVPR52688.2022.01104"},{"key":"19","doi-asserted-by":"crossref","unstructured":"[19] X. Wu, Z. Hu, L. Sheng, and D. Xu, \u201cStyleformer: Real-time arbitrary style transfer via parametric style composition,\u201d Proc. IEEE\/CVF International Conference on Computer Vision, pp.14618-14627, 2021.","DOI":"10.1109\/ICCV48922.2021.01435"},{"key":"20","unstructured":"[20] A. Radford, J.W. Kim, C. Hallacy, A. Ramesh, G. Goh, S. Agarwal, G. Sastry, A. Askell, P. Mishkin, J. Clark, et al., \u201cLearning transferable visual models from natural language supervision,\u201d Proc. International Conference on Machine Learning, pp.8748-8763, 2021."},{"key":"21","doi-asserted-by":"crossref","unstructured":"[21] G. Kwon and J.C. Ye, \u201cClipstyler: Image style transfer with a single text condition,\u201d Proc. IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp.18062-18071, 2022.","DOI":"10.1109\/CVPR52688.2022.01753"},{"key":"22","doi-asserted-by":"crossref","unstructured":"[22] O. Patashnik, Z. Wu, E. Shechtman, D. Cohen-Or, and D. Lischinski, \u201cStyleClip: Text-driven manipulation of stylegan imagery,\u201d Proc. IEEE\/CVF International Conference on Computer Vision, pp.2085-2094, 2021.","DOI":"10.1109\/ICCV48922.2021.00209"},{"key":"23","doi-asserted-by":"publisher","unstructured":"[23] S. Yang, J. Liu, W. Wang, and Z. Guo, \u201cTet-gan: Text effects transfer via stylization and destylization,\u201d Proc. AAAI Conference on Artificial Intelligence, vol.33, no.01, pp.1238-1245, 2019. 10.1609\/aaai.v33i01.33011238","DOI":"10.1609\/aaai.v33i01.33011238"},{"key":"24","doi-asserted-by":"publisher","unstructured":"[24] W. Li, Y. He, Y. Qi, Z. Li, and Y. Tang, \u201cFET-GAN: Font and effect transfer via k-shot adaptive instance normalization.,\u201d Proc. AAAI Conference on Artificial Intelligence, vol.34, no.02, pp.1717-1724, 2020. 10.1609\/aaai.v34i02.5535","DOI":"10.1609\/aaai.v34i02.5535"},{"key":"25","doi-asserted-by":"publisher","unstructured":"[25] H. Hayashi, K. Abe, and S. Uchida, \u201cGlyphGAN: Style-consistent font generation based on generative adversarial networks,\u201d Knowledge-Based Systems, vol.186, p.104927, 2019. 10.1016\/j.knosys.2019.104927","DOI":"10.1016\/j.knosys.2019.104927"},{"key":"26","doi-asserted-by":"crossref","unstructured":"[26] Q. Yang, J. Huang, and W. Lin, \u201cSwaptext: Image based texts transfer in scenes,\u201d Proc. IEEE Computer Vision and Pattern Recognition, pp.14700-14709, 2020.","DOI":"10.1109\/CVPR42600.2020.01471"},{"key":"27","doi-asserted-by":"crossref","unstructured":"[27] S. Azadi, M. Fisher, V. Kim, Z. Wang, E. Shechtman, and T. Darrell, \u201cMulti-content gan for few-shot font style transfer,\u201d Proc. IEEE Computer Vision and Pattern Recognition, pp.7564-7573, 2018.","DOI":"10.1109\/CVPR.2018.00789"},{"key":"28","doi-asserted-by":"crossref","unstructured":"[28] W. Wang, J. Liu, S. Yang, and Z. Guo, \u201cTypography with Decor: Intelligent text style transfer,\u201d Proc. IEEE Computer Vision and Pattern Recognition, pp.5889-5897, 2019.","DOI":"10.1109\/CVPR.2019.00604"},{"key":"29","unstructured":"[29] Y. Song and Y. Zhang, \u201cCLIPFont: Text guided vector wordart generation,\u201d Proc. British Machine Vision Conference, 2022. 10.1007\/978-3-031-19815-1_18"}],"container-title":["IEICE Transactions on Information and Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/transinf\/E107.D\/4\/E107.D_2023IHP0010\/_pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,4,6]],"date-time":"2024-04-06T04:27:14Z","timestamp":1712377634000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.jstage.jst.go.jp\/article\/transinf\/E107.D\/4\/E107.D_2023IHP0010\/_article"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,1]]},"references-count":29,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2024]]}},"URL":"https:\/\/doi.org\/10.1587\/transinf.2023ihp0010","relation":{},"ISSN":["0916-8532","1745-1361"],"issn-type":[{"value":"0916-8532","type":"print"},{"value":"1745-1361","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,4,1]]},"article-number":"2023IHP0010"}}