{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,25]],"date-time":"2025-09-25T15:02:04Z","timestamp":1758812524267,"version":"3.37.3"},"reference-count":39,"publisher":"Springer Science and Business Media LLC","issue":"8","license":[{"start":{"date-parts":[[2023,6,24]],"date-time":"2023-06-24T00:00:00Z","timestamp":1687564800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,6,24]],"date-time":"2023-06-24T00:00:00Z","timestamp":1687564800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["no. 61832016","no. U20B2070"],"award-info":[{"award-number":["no. 61832016","no. U20B2070"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012166","name":"National Key R &D Program of China","doi-asserted-by":"crossref","award":["no. 2020AAA0106200"],"award-info":[{"award-number":["no. 2020AAA0106200"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100020980","name":"Beijing Innovation Center for Engineering Science and Advanced Technology, Peking University","doi-asserted-by":"publisher","award":["no. L221013"],"award-info":[{"award-number":["no. L221013"]}],"id":[{"id":"10.13039\/501100020980","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2023,8]]},"DOI":"10.1007\/s00371-023-02956-1","type":"journal-article","created":{"date-parts":[[2023,6,24]],"date-time":"2023-06-24T13:06:09Z","timestamp":1687611969000},"page":"3507-3518","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":8,"title":["PLDGAN: portrait line drawing generation with prior knowledge and conditioning target"],"prefix":"10.1007","volume":"39","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-9331-4593","authenticated-orcid":false,"given":"Sifei","family":"Li","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3843-7406","authenticated-orcid":false,"given":"Fuzhang","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Yuqing","family":"fan","sequence":"additional","affiliation":[]},{"given":"Xue","family":"Song","sequence":"additional","affiliation":[]},{"given":"Weiming","family":"Dong","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,6,24]]},"reference":[{"key":"2956_CR1","doi-asserted-by":"crossref","unstructured":"Bhunia, A.K., Khan, S., Cholakkal, H., Anwer, R.M., Khan, F.S., Laaksonen, J., Felsberg, M.: Doodleformer: Creative sketch drawing with transformers. In: European Conference on Computer Vision (ECCV), pp. 338\u2013355. Springer (2022)","DOI":"10.1007\/978-3-031-19790-1_21"},{"key":"2956_CR2","unstructured":"Canny, J.F.: Finding Edges and Lines in Images. Tech. rep., MASSACHUSETTS INST OF TECH CAMBRIDGE ARTIFICIAL INTELLIGENCE LAB (1983)"},{"key":"2956_CR3","doi-asserted-by":"crossref","unstructured":"Cao, Z., Simon, T., Wei, S.E., Sheikh, Y.: Realtime multi-person 2nd pose estimation using part affinity fields. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 7291\u20137299 (2017)","DOI":"10.1109\/CVPR.2017.143"},{"key":"2956_CR4","doi-asserted-by":"crossref","unstructured":"Chan, C., Durand, F., Isola, P.: Learning to generate line drawings that convey geometry and semantics. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 7905\u20137915 (2022)","DOI":"10.1109\/CVPR52688.2022.00776"},{"key":"2956_CR5","doi-asserted-by":"crossref","unstructured":"Chen, Y.J., Cheng, S.I., Chiu, W.C., Tseng, H.Y., Lee, H.Y.: Vector quantized image-to-image translation. In: European Conference on Computer Vision (ECCV), pp. 440\u2013456. Springer (2022)","DOI":"10.1007\/978-3-031-19787-1_25"},{"issue":"7","key":"2956_CR6","doi-asserted-by":"publisher","first-page":"1160","DOI":"10.1364\/JOSAA.2.001160","volume":"2","author":"JG Daugman","year":"1985","unstructured":"Daugman, J.G.: Uncertainty relation for resolution in space, spatial frequency, and orientation optimized by two-dimensional visual cortical filters. JOSA A 2(7), 1160\u20131169 (1985)","journal-title":"JOSA A"},{"key":"2956_CR7","doi-asserted-by":"crossref","unstructured":"Deng, Y., Tang, F., Dong, W., Huang, H., Ma, C., Xu, C.: Arbitrary video style transfer via multi-channel correlation. In: AAAI Conference on Artificial Intelligence (AAAI), pp. 1210\u20131217 (2021)","DOI":"10.1609\/aaai.v35i2.16208"},{"key":"2956_CR8","doi-asserted-by":"crossref","unstructured":"Deng, Y., Tang, F., Dong, W., Ma, C., Pan, X., Wang, L., Xu, C.: StyTr$$^2$$: Image style transfer with transformers. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 11,326\u201311,336 (2022)","DOI":"10.1109\/CVPR52688.2022.01104"},{"issue":"8","key":"2956_CR9","doi-asserted-by":"publisher","first-page":"1558","DOI":"10.1109\/TPAMI.2014.2377715","volume":"37","author":"P Doll\u00e1r","year":"2014","unstructured":"Doll\u00e1r, P., Zitnick, C.L.: Fast edge detection using structured forests. IEEE Trans. Pattern Anal. Mach. Intell. 37(8), 1558\u20131570 (2014)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"2956_CR10","unstructured":"Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., Bengio, Y.: Generative adversarial nets. Adv. Neural Inf. Process. Syst. 27 (2014)"},{"key":"2956_CR11","doi-asserted-by":"publisher","first-page":"1435","DOI":"10.1109\/TMM.2021.3065230","volume":"24","author":"J Huang","year":"2021","unstructured":"Huang, J., Liao, J., Kwong, S.: Unsupervised image-to-image translation via pre-trained StyleGAN2 network. IEEE Trans. Multimedia 24, 1435\u20131448 (2021)","journal-title":"IEEE Trans. Multimedia"},{"key":"2956_CR12","doi-asserted-by":"crossref","unstructured":"Isola, P., Zhu, J.Y., Zhou, T., Efros, A.A.: Image-to-image translation with conditional adversarial networks. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1125\u20131134 (2017)","DOI":"10.1109\/CVPR.2017.632"},{"key":"2956_CR13","doi-asserted-by":"crossref","unstructured":"Karras, T., Laine, S., Aittala, M., Hellsten, J., Lehtinen, J., Aila, T.: Analyzing and improving the image quality of StyleGAN. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 8110\u20138119 (2020)","DOI":"10.1109\/CVPR42600.2020.00813"},{"key":"2956_CR14","unstructured":"Kingma, D.P., Ba, J.: Adam: A Method for Stochastic Optimization. In: International Conference on Learning Representations (ICLR) (2015)"},{"key":"2956_CR15","doi-asserted-by":"crossref","unstructured":"Li, M., Lin, Z., Mech, R., Yumer, E., Ramanan, D.: Photo-sketching: Inferring contour drawings from images. In: IEEE Winter Conference on Applications of Computer Vision (WACV), pp. 1403\u20131412. IEEE (2019)","DOI":"10.1109\/WACV.2019.00154"},{"key":"2956_CR16","doi-asserted-by":"crossref","unstructured":"Liu, F., Deng, X., Lai, Y.K., Liu, Y.J., Ma, C., Wang, H.: SketchGAN: Joint sketch completion and recognition with generative adversarial network. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 5830\u20135839 (2019)","DOI":"10.1109\/CVPR.2019.00598"},{"key":"2956_CR17","doi-asserted-by":"crossref","unstructured":"Mairal, J., Leordeanu, M., Bach, F., Hebert, M., Ponce, J.: Discriminative sparse image models for class-specific edge detection and image interpretation. In: European Conference on Computer Vision (ECCV), pp. 43\u201356. Springer (2008)","DOI":"10.1007\/978-3-540-88690-7_4"},{"key":"2956_CR18","doi-asserted-by":"crossref","unstructured":"Park, T., Liu, M.Y., Wang, T.C., Zhu, J.Y.: Semantic image synthesis with spatially-adaptive normalization. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2337\u20132346 (2019)","DOI":"10.1109\/CVPR.2019.00244"},{"key":"2956_CR19","unstructured":"Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et\u00a0al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning (ICML), pp. 8748\u20138763. PMLR (2021)"},{"key":"2956_CR20","unstructured":"Ribeiro, L.S.F., Bui, T., Collomosse, J., Ponti, M.: Sketchformer: Transformer-based representation for sketched structure. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 14,153\u201314,162 (2020)"},{"key":"2956_CR21","doi-asserted-by":"crossref","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-net: Convolutional networks for biomedical image segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 234\u2013241. Springer (2015)","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"2956_CR22","doi-asserted-by":"crossref","unstructured":"Saharia, C., Chan, W., Saxena, S., Li, L., Whang, J., Denton, E., Ghasemipour, S.K.S., Ayan, B.K., Mahdavi, S.S., Lopes, R.G., et\u00a0al.: Photorealistic text-to-image diffusion models with deep language understanding. In: Thirty-sixth Conference on Neural Information Processing Systems (2022)","DOI":"10.1145\/3528233.3530757"},{"key":"2956_CR23","unstructured":"Simonyan, K., Zisserman, A.: Very Deep Convolutional Networks for Large-Scale Image Recognition. In: International Conference on Learning Representations (ICLR) (2015)"},{"key":"2956_CR24","doi-asserted-by":"publisher","first-page":"109,461","DOI":"10.1016\/j.patcog.2023.109461","volume":"139","author":"X Soria","year":"2023","unstructured":"Soria, X., Sappa, A., Humanante, P., Akbarinia, A.: Dense extreme inception network for edge detection. Pattern Recogn. 139, 109,461 (2023)","journal-title":"Pattern Recogn."},{"key":"2956_CR25","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z.: Rethinking the inception architecture for computer vision. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2818\u20132826 (2016)","DOI":"10.1109\/CVPR.2016.308"},{"key":"2956_CR26","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, \u0141., Polosukhin, I.: Attention is all you need. In: Advances in Neural Information Processing Systems (NIPS) (2017)"},{"issue":"4","key":"2956_CR27","doi-asserted-by":"publisher","first-page":"86:1","DOI":"10.1145\/3528223.3530068","volume":"41","author":"Y Vinker","year":"2022","unstructured":"Vinker, Y., Pajouheshgar, E., Bo, J.Y., Bachmann, R.C., Bermano, A.H., Cohen-Or, D., Zamir, A., Shamir, A.: CLIPasso: Semantically-aware object sketching. ACM Trans. Graph. 41(4), 86:1-86:11 (2022)","journal-title":"ACM Trans. Graph."},{"key":"2956_CR28","doi-asserted-by":"crossref","unstructured":"Wang, T.C., Liu, M.Y., Zhu, J.Y., Tao, A., Kautz, J., Catanzaro, B.: High-resolution image synthesis and semantic manipulation with conditional GANs. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 8798\u20138807 (2018)","DOI":"10.1109\/CVPR.2018.00917"},{"key":"2956_CR29","unstructured":"Xiaofeng, R., Bo, L.: Discriminatively trained sparse code gradients for contour detection. In: Advances in Neural Information Processing Systems (NIPS) (2012)"},{"key":"2956_CR30","doi-asserted-by":"crossref","unstructured":"Xie, S., Tu, Z.: Holistically-nested edge detection. In: IEEE International Conference on Computer Vision (ICCV), pp. 1395\u20131403 (2015)","DOI":"10.1109\/ICCV.2015.164"},{"issue":"1","key":"2956_CR31","doi-asserted-by":"publisher","first-page":"905","DOI":"10.1109\/TPAMI.2022.3147570","volume":"45","author":"R Yi","year":"2023","unstructured":"Yi, R., Liu, Y.J., Lai, Y.K., Rosin, P.: Quality metric guided portrait line drawing generation from unpaired training data. IEEE Trans. Pattern Anal. Mach. Intell. 45(1), 905\u2013918 (2023)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"9","key":"2956_CR32","doi-asserted-by":"publisher","first-page":"4350","DOI":"10.1109\/TCYB.2020.2972944","volume":"51","author":"J Yu","year":"2020","unstructured":"Yu, J., Xu, X., Gao, F., Shi, S., Wang, M., Tao, D., Huang, Q.: Toward realistic face photo-sketch synthesis via composition-aided GANs. IEEE Trans. Cybern. 51(9), 4350\u20134362 (2020)","journal-title":"IEEE Trans. Cybern."},{"key":"2956_CR33","doi-asserted-by":"crossref","unstructured":"Zhan, F., Yu, Y., Wu, R., Zhang, J., Cui, K., Xiao, A., Lu, S., Miao, C.: Bi-level feature alignment for versatile image translation and manipulation. In: European Conference on Computer Vision (ECCV), pp. 224\u2013241. Springer (2022)","DOI":"10.1007\/978-3-031-19787-1_13"},{"issue":"3","key":"2956_CR34","doi-asserted-by":"publisher","first-page":"3144","DOI":"10.1109\/TII.2022.3160705","volume":"19","author":"X Zhang","year":"2023","unstructured":"Zhang, X., Fan, C., Xiao, Z., Zhao, L., Chen, H., Chang, X.: Random reconstructed unpaired image-to-image translation. IEEE Trans. Industr. Inf. 19(3), 3144\u20133154 (2023)","journal-title":"IEEE Trans. Industr. Inf."},{"key":"2956_CR35","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Huang, N., Tang, F., Huang, H., Ma, C., Dong, W., Xu, C.: Inversion-based style transfer with diffusion models. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) (2023)","DOI":"10.1109\/CVPR52729.2023.00978"},{"key":"2956_CR36","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Tang, F., Dong, W., Huang, H., Ma, C., Lee, T.Y., Xu, C.: Domain enhanced arbitrary image style transfer via contrastive learning. In: ACM SIGGRAPH 2022 Conference Proceedings, pp. 12:1\u201312:8. Association for Computing Machinery, New York, NY, USA (2022)","DOI":"10.1145\/3528233.3530736"},{"key":"2956_CR37","doi-asserted-by":"crossref","unstructured":"Zhou, X., Zhang, B., Zhang, T., Zhang, P., Bao, J., Chen, D., Zhang, Z., Wen, F.: Cocosnet v2: Full-resolution correspondence learning for image translation. In: IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 11,465\u201311,475 (2021)","DOI":"10.1109\/CVPR46437.2021.01130"},{"key":"2956_CR38","doi-asserted-by":"crossref","unstructured":"Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: IEEE International Conference on Computer Vision (ICCV), pp. 2223\u20132232 (2017)","DOI":"10.1109\/ICCV.2017.244"},{"issue":"10","key":"2956_CR39","doi-asserted-by":"publisher","first-page":"3096","DOI":"10.1109\/TNNLS.2018.2890018","volume":"30","author":"M Zhu","year":"2019","unstructured":"Zhu, M., Li, J., Wang, N., Gao, X.: A deep collaborative framework for face photo-sketch synthesis. IEEE Trans. Neural Netw. Learn. Syst. 30(10), 3096\u20133108 (2019)","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-023-02956-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-023-02956-1\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-023-02956-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,12,15]],"date-time":"2023-12-15T16:16:50Z","timestamp":1702657010000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-023-02956-1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6,24]]},"references-count":39,"journal-issue":{"issue":"8","published-print":{"date-parts":[[2023,8]]}},"alternative-id":["2956"],"URL":"https:\/\/doi.org\/10.1007\/s00371-023-02956-1","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"type":"print","value":"0178-2789"},{"type":"electronic","value":"1432-2315"}],"subject":[],"published":{"date-parts":[[2023,6,24]]},"assertion":[{"value":"9 June 2023","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 June 2023","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"None of the authors has any potential conflicts of interest to declare related to this research. The portrait images used in this study were obtained from a free public image website, which allows commercial and research use of their images. No identifiable information was collected or shared, and the privacy of the individuals represented in these images has been protected.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}