{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,18]],"date-time":"2026-01-18T07:21:44Z","timestamp":1768720904853,"version":"3.49.0"},"reference-count":36,"publisher":"Springer Science and Business Media LLC","issue":"20","license":[{"start":{"date-parts":[[2022,6,9]],"date-time":"2022-06-09T00:00:00Z","timestamp":1654732800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,6,9]],"date-time":"2022-06-09T00:00:00Z","timestamp":1654732800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62006191"],"award-info":[{"award-number":["62006191"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Key RD Program of Shaanxi","award":["2021ZDLGY15-04"],"award-info":[{"award-number":["2021ZDLGY15-04"]}]},{"name":"Changjiang Scholars and Innovative Research Team in University","award":["IRT-17R87"],"award-info":[{"award-number":["IRT-17R87"]}]},{"name":"Xi\u2019an Key Laboratory of Intelligent Perception and Cultural Inheritance","award":["2019219614SYS011CG033"],"award-info":[{"award-number":["2019219614SYS011CG033"]}]},{"name":"Shaanxi Provincial Department of Education Special Scientific Research Project","award":["20JK0940"],"award-info":[{"award-number":["20JK0940"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Neural Comput &amp; Applic"],"published-print":{"date-parts":[[2022,10]]},"DOI":"10.1007\/s00521-022-07432-w","type":"journal-article","created":{"date-parts":[[2022,6,10]],"date-time":"2022-06-10T00:02:33Z","timestamp":1654819353000},"page":"18075-18096","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":25,"title":["Contour-enhanced CycleGAN framework for style transfer from scenery photos to Chinese landscape paintings"],"prefix":"10.1007","volume":"34","author":[{"given":"Xianlin","family":"Peng","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1813-4050","authenticated-orcid":false,"given":"Shenglin","family":"Peng","sequence":"additional","affiliation":[]},{"given":"Qiyao","family":"Hu","sequence":"additional","affiliation":[]},{"given":"Jinye","family":"Peng","sequence":"additional","affiliation":[]},{"given":"Jiaxin","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Xinyu","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Jianping","family":"Fan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,6,9]]},"reference":[{"key":"7432_CR1","first-page":"520","volume":"10","author":"SW Huang","year":"2003","unstructured":"Huang SW, Way DL, Shih ZC (2003) Physical-based model of ink diffusion in Chinese ink paintings. J World Soc Comput Graph 10:520","journal-title":"J World Soc Comput Graph"},{"key":"7432_CR2","doi-asserted-by":"publisher","first-page":"1307","DOI":"10.1631\/FITEE.1900195","volume":"20","author":"L Huang","year":"2019","unstructured":"Huang L, Hou Z, Zhao Y, Zhang D (2019) Research progress on and prospects for virtual brush modeling in digital calligraphy and painting. Front Inf Technol Electron Eng 20:1307","journal-title":"Front Inf Technol Electron Eng"},{"key":"7432_CR3","doi-asserted-by":"crossref","unstructured":"Li XX, Li Y (2006) Simulation of Chinese ink-wash painting based on landscapes and trees. In: Fourcaud T, Zhang XP (eds) 2006 Second international symposium on plant growth modeling and applications, vol 328. IEEE, Los Alamitos","DOI":"10.1109\/PMA.2006.40"},{"key":"7432_CR4","unstructured":"Chen TD, Yu CH (2009) Hairy brush model interactive simulation in Chinese ink painting style. In: The 2009 international symposium on information processing (ISIP), vol 184. Citeseer"},{"key":"7432_CR5","doi-asserted-by":"crossref","unstructured":"Chen T (2009) Non-photorealistic rendering of ink painting style diffusion. In: Lin TY, Hu XH, Xia JL, Hong TP, Shi ZZ, Han JC, Tsumoto S, Shen ZJ (eds) 2009 IEEE international conference on granular computing, vol 78. IEEE, New York","DOI":"10.1109\/GRC.2009.5255155"},{"key":"7432_CR6","first-page":"2672","volume":"27","author":"IJ Goodfellow","year":"2014","unstructured":"Goodfellow IJ, Pouget-Abadie J, Mirza M, Xu B, Warde-Farley D, Ozair S, Courville A, Bengio Y (2014) Generative adversarial. Networks 27:2672","journal-title":"Networks"},{"key":"7432_CR7","doi-asserted-by":"crossref","unstructured":"Zheng Z, Yang X, Yu Z, Zheng L, Yang Y, Kautz J (2019) Joint discriminative and generative learning for person re-identification. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, vol 2138","DOI":"10.1109\/CVPR.2019.00224"},{"key":"7432_CR8","unstructured":"Reed S, Akata Z, Yan X, Logeswaran L, Schiele B, Lee H (2016) Generative adversarial text to image synthesis. In: International conference on machine learning, vol 1060. PMLR"},{"key":"7432_CR9","doi-asserted-by":"crossref","unstructured":"Zhu J, Park T, Isola P, Efros AA (2017) Unpaired image-to-image translation using cycle-consistent adversarial networks. In: Proceedings of the IEEE international conference on computer vision, vol 2223","DOI":"10.1109\/ICCV.2017.244"},{"key":"7432_CR10","unstructured":"Radford A, Metz L, Chintala S (2015) Unsupervised representation learning with deep convolutional generative adversarial networks. arXiv preprint arXiv:1511.06434"},{"key":"7432_CR11","unstructured":"Brock A, Donahue J, Simonyan K (2018) Large scale GAN training for high fidelity natural image synthesis. arXiv preprint arXiv:1511.06434"},{"key":"7432_CR12","doi-asserted-by":"crossref","unstructured":"Karras T, Laine S, Aila T (2019) A style-based generator architecture for generative adversarial networks. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, vol 4401","DOI":"10.1109\/CVPR.2019.00453"},{"key":"7432_CR13","doi-asserted-by":"crossref","unstructured":"Zhang H, Xu T, Li H, Zhang S, Wang X, Huang X, Metaxas D (2017) StackGAN: text to photo-realistic image synthesis with stacked generative adversarial networks. In: Proceedings of the IEEE international conference on computer vision, vol 5907","DOI":"10.1109\/ICCV.2017.629"},{"key":"7432_CR14","doi-asserted-by":"crossref","unstructured":"Antipov G, Baccouche M, Dugelay JL (2017) Face aging with conditional generative adversarial networks. In: 2017 IEEE international conference on image processing (ICIP), vol 2089. IEEE","DOI":"10.1109\/ICIP.2017.8296650"},{"key":"7432_CR15","doi-asserted-by":"crossref","unstructured":"Choi Y, Choi M, Kim M, Ha JW, Choo J (2018) StarGAN: unified generative adversarial networks for multi-domain image-to-image translation. In: Proceedings of the IEEE conference on computer vision and pattern recognition, vol 8789","DOI":"10.1109\/CVPR.2018.00916"},{"key":"7432_CR16","unstructured":"Kim J, Kim M, Kang H, Lee K (2019) U-GAT-IT: unsupervised generative attentional networks with adaptive layer-instance normalization for image-to-image translation. arXiv preprint arXiv:1907.10830"},{"key":"7432_CR17","doi-asserted-by":"crossref","unstructured":"Huang X, Liu M, Belongie S, Kautz J (2018) Multimodal unsupervised image-to-image translation. In: Proceedings of the European conference on computer vision (ECCV), vol 172","DOI":"10.1007\/978-3-030-01219-9_11"},{"key":"7432_CR18","doi-asserted-by":"crossref","unstructured":"Chen R, Huang W, Huang B, Sun F, Fang B (2020) Reusing discriminators for encoding towards unsupervised image-to-image translation. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, vol 8168","DOI":"10.1109\/CVPR42600.2020.00819"},{"key":"7432_CR19","unstructured":"Chen M (2018) The modern meaning of traditional Cun method. Fine Arts 1:58-61"},{"key":"7432_CR20","doi-asserted-by":"publisher","first-page":"3998","DOI":"10.1109\/TIP.2018.2831899","volume":"27","author":"H Talebi","year":"2018","unstructured":"Talebi H, Milanfar P (2018) NIMA: neural image assessment. IEEE Trans Image Process 27:3998","journal-title":"IEEE Trans Image Process"},{"key":"7432_CR21","doi-asserted-by":"crossref","unstructured":"Isola P, Zhu JY, Zhou T, Efros AA (2017) Image-to-image translation with conditional adversarial networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition, vol 1125","DOI":"10.1109\/CVPR.2017.632"},{"key":"7432_CR22","doi-asserted-by":"publisher","first-page":"132002","DOI":"10.1109\/ACCESS.2020.3009470","volume":"8","author":"F Zhang","year":"2020","unstructured":"Zhang F, Gao H, Lai Y (2020) Detail-preserving CycleGAN-AdaIN framework for image-to-ink painting translation. IEEE Access 8:132002","journal-title":"IEEE Access"},{"key":"7432_CR23","doi-asserted-by":"publisher","first-page":"37","DOI":"10.1016\/0262-8856(83)90006-9","volume":"1","author":"J Kittler","year":"1983","unstructured":"Kittler J (1983) On the accuracy of the Sobel edge detector. Image Vis Comput 1:37","journal-title":"Image Vis Comput"},{"key":"7432_CR24","doi-asserted-by":"publisher","first-page":"49741","DOI":"10.1109\/ACCESS.2020.2980060","volume":"8","author":"A Chen","year":"2020","unstructured":"Chen A, Xing H, Wang F (2020) A facial expression recognition method using deep convolutional neural networks based on edge computing. IEEE Access 8:49741","journal-title":"IEEE Access"},{"key":"7432_CR25","doi-asserted-by":"publisher","first-page":"6779","DOI":"10.1007\/s00521-019-04358-8","volume":"32","author":"S Karatsiolis","year":"2020","unstructured":"Karatsiolis S, Christos S (2020) Modular domain-to-domain translation network. Neural Comput Appl 32:6779","journal-title":"Neural Comput Appl"},{"key":"7432_CR26","unstructured":"Barratt S, Sharma R (2018) A note on the inception score. arXiv preprint arXiv:1801.01973"},{"key":"7432_CR27","doi-asserted-by":"publisher","first-page":"450","DOI":"10.1016\/0047-259X(82)90077-X","volume":"12","author":"DC Dowson","year":"1982","unstructured":"Dowson DC, Landau BV (1982) The Fr\u00e9chet distance between multivariate normal distributions. J Multivar Anal 12:450","journal-title":"J Multivar Anal"},{"key":"7432_CR28","unstructured":"Bi\u0144kowski M, Sutherland DJ, Arbel M, Gretton A (2018) Demystifying MMD GANs. arXiv preprint arXiv:1801.01401"},{"key":"7432_CR29","doi-asserted-by":"crossref","unstructured":"Hung SK, Gan JQ (2021) Facial image augmentation from sparse line features using small training data. In: International work-conference on artificial neural networks, vol 547. Springer","DOI":"10.1007\/978-3-030-85030-2_45"},{"key":"7432_CR30","doi-asserted-by":"crossref","unstructured":"Shmelkov K, Schmid C, Alahari K (2018) How good is my GAN?. In: Proceedings of the European conference on computer vision (ECCV), vol 213","DOI":"10.1007\/978-3-030-01216-8_14"},{"key":"7432_CR31","unstructured":"Devries T, Romero A, Pineda L, Taylor GW, Drozdzal M (2019) On the evaluation of conditional GANs. arXiv preprint arXiv:1907.08175"},{"key":"7432_CR32","doi-asserted-by":"publisher","first-page":"600","DOI":"10.1109\/TIP.2003.819861","volume":"13","author":"W Zhou","year":"2004","unstructured":"Zhou W, Bovik AC, Sheikh HR, Simoncelli EP (2004) Image quality assessment: from error visibility to structural similarity. IEEE Trans Image Process 13:600","journal-title":"IEEE Trans Image Process"},{"key":"7432_CR33","doi-asserted-by":"publisher","first-page":"12499","DOI":"10.1007\/s00521-020-04708-x","volume":"32","author":"P Devan","year":"2020","unstructured":"Devan P, Khare N (2020) An efficient XGBoost\u2013DNN-based classification model for network intrusion detection system. Neural Comput Appl 32:12499\u201312514","journal-title":"Neural Comput Appl"},{"key":"7432_CR34","unstructured":"Ravuri S, Vinyals O (2019) Seeing is not necessarily believing: limitations of biggans for data augmentation. In: ICLR workshop on international conference on learning representations"},{"key":"7432_CR35","first-page":"47","volume":"25","author":"N He","year":"2017","unstructured":"He N, Xie K, Li T, Ye Y (2017) Overview of image quality assessment. J Beijing Inst Graph Commun 25:47","journal-title":"J Beijing Inst Graph Commun"},{"key":"7432_CR36","doi-asserted-by":"publisher","first-page":"31","DOI":"10.1016\/j.dsp.2018.12.004","volume":"91","author":"J Wu","year":"2018","unstructured":"Wu J, Xia Z, Zhang H, Li H (2018) Blind quality assessment for screen content images by combining local and global features. Digit Signal Process 91:31","journal-title":"Digit Signal Process"}],"container-title":["Neural Computing and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00521-022-07432-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00521-022-07432-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00521-022-07432-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,9,23]],"date-time":"2022-09-23T15:38:54Z","timestamp":1663947534000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00521-022-07432-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,6,9]]},"references-count":36,"journal-issue":{"issue":"20","published-print":{"date-parts":[[2022,10]]}},"alternative-id":["7432"],"URL":"https:\/\/doi.org\/10.1007\/s00521-022-07432-w","relation":{},"ISSN":["0941-0643","1433-3058"],"issn-type":[{"value":"0941-0643","type":"print"},{"value":"1433-3058","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,6,9]]},"assertion":[{"value":"17 September 2021","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"11 May 2022","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 June 2022","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"Xianlin Peng, Shenglin Peng, Qiyao Hu, Jinye Peng, Jiaxin Wang, Xinyu Liu, and Jianping Fan declare that they have no conflict of interest","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}