{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,14]],"date-time":"2025-06-14T04:44:12Z","timestamp":1749876252295,"version":"3.40.3"},"publisher-location":"Cham","reference-count":45,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031189159"},{"type":"electronic","value":"9783031189166"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-18916-6_12","type":"book-chapter","created":{"date-parts":[[2022,10,26]],"date-time":"2022-10-26T23:03:53Z","timestamp":1666825433000},"page":"137-151","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["LAGAN: Landmark Aided Text to\u00a0Face Sketch Generation"],"prefix":"10.1007","author":[{"given":"Wentao","family":"Chao","sequence":"first","affiliation":[]},{"given":"Liang","family":"Chang","sequence":"additional","affiliation":[]},{"given":"Fangfang","family":"Xi","sequence":"additional","affiliation":[]},{"given":"Fuqing","family":"Duan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,10,27]]},"reference":[{"key":"12_CR1","unstructured":"Chang, A.X., et al.: ShapeNet: an information-rich 3D model repository. arXiv preprint arXiv:1512.03012 (2015)"},{"key":"12_CR2","doi-asserted-by":"crossref","unstructured":"Chang, L., Zhou, M., Han, Y., Deng, X.: Face sketch synthesis via sparse representation. In: ICPR, pp. 2146\u20132149 (2010)","DOI":"10.1109\/ICPR.2010.526"},{"key":"12_CR3","doi-asserted-by":"crossref","unstructured":"Choi, Y., Choi, M., Kim, M., Ha, J.W., Kim, S., Choo, J.: StarGAN: unified generative adversarial networks for multi-domain image-to-image translation. In: CVPR, pp. 8789\u20138797 (2018)","DOI":"10.1109\/CVPR.2018.00916"},{"key":"12_CR4","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: ImageNet: a large-scale hierarchical image database. In: CVPR, pp. 248\u2013255 (2009)","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"12_CR5","unstructured":"Di, X., Patel, V.M.: Face synthesis from visual attributes via sketch using conditional vaes and gans. arXiv preprint arXiv:1801.00077 (2017)"},{"key":"12_CR6","unstructured":"Goodfellow, I.J., et al.: Generative adversarial nets. In: NeurIPS, pp. 2672\u20132680 (2014)"},{"key":"12_CR7","unstructured":"Gorti, S.K., Ma, J.: Text-to-image-to-text translation using cycle consistent adversarial networks. arXiv preprint arXiv:1808.04538 (2018)"},{"issue":"11","key":"12_CR8","doi-asserted-by":"publisher","first-page":"2597","DOI":"10.1109\/TPAMI.2017.2738004","volume":"40","author":"H Han","year":"2017","unstructured":"Han, H., Jain, A.K., Wang, F., Shan, S., Chen, X.: Heterogeneous face attribute estimation: a deep multi-task learning approach. TPAMI 40(11), 2597\u20132609 (2017)","journal-title":"TPAMI"},{"key":"12_CR9","unstructured":"Kingma, D.P., Welling, M.: Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114 (2013)"},{"key":"12_CR10","doi-asserted-by":"crossref","unstructured":"Klare, B.F., Klum, S., Klontz, J.C., Taborsky, E., Akgul, T., Jain, A.K.: Suspect identification based on descriptive facial attributes. In: IJCB, pp. 1\u20138 (2014)","DOI":"10.1109\/BTAS.2014.6996255"},{"key":"12_CR11","unstructured":"Kurach, K., Lucic, M., Zhai, X., Michalski, M., Gelly, S.: The GAN landscape: losses, architectures, regularization, and normalization (2018). CoRR abs\/1807.04720"},{"key":"12_CR12","unstructured":"Li, B., Qi, X., Lukasiewicz, T., Torr, P.H.: Controllable text-to-image generation. In: NeurIPS, pp. 2065\u20132075 (2019)"},{"key":"12_CR13","doi-asserted-by":"crossref","unstructured":"Liao, W., Hu, K., Yang, M.Y., Rosenhahn, B.: Text to image generation with semantic-spatial aware GAN. arXiv preprint arXiv:2104.00567 (2021)","DOI":"10.1109\/CVPR52688.2022.01765"},{"key":"12_CR14","unstructured":"Lucic, M., Kurach, K., Michalski, M., Gelly, S., Bousquet, O.: Are gans created equal? a large-scale study. arXiv preprint arXiv:1711.10337 (2017)"},{"key":"12_CR15","unstructured":"Mirza, M., Osindero, S.: Conditional generative adversarial nets. arXiv preprint arXiv:1411.1784 (2014)"},{"key":"12_CR16","unstructured":"Miyato, T., Kataoka, T., Koyama, M., Yoshida, Y.: Spectral normalization for generative adversarial networks. arXiv preprint arXiv:1802.05957 (2018)"},{"key":"12_CR17","unstructured":"Reed, S., Akata, Z., Yan, X., Logeswaran, L., Schiele, B., Lee, H.: Generative adversarial text to image synthesis. In: ICML, pp. 1060\u20131069 (2016)"},{"key":"12_CR18","unstructured":"Reed, S.E., Akata, Z., Mohan, S., Tenka, S., Schiele, B., Lee, H.: Learning what and where to draw. In: NeurIPS, pp. 217\u2013225 (2016)"},{"key":"12_CR19","unstructured":"Salimans, T., Goodfellow, I., Zaremba, W., Cheung, V., Radford, A., Chen, X.: Improved techniques for training GANs. In: NeurIPS, pp. 2234\u20132242 (2016)"},{"key":"12_CR20","doi-asserted-by":"crossref","unstructured":"Sangkloy, P., Lu, J., Fang, C., Yu, F., Hays, J.: Scribbler: controlling deep image synthesis with sketch and color. In: CVPR, pp. 6836\u20136845 (2017)","DOI":"10.1109\/CVPR.2017.723"},{"key":"12_CR21","doi-asserted-by":"crossref","unstructured":"Shen, W., Liu, R.: Learning residual images for face attribute manipulation. In: CVPR, pp. 1225\u20131233 (2017)","DOI":"10.1109\/CVPR.2017.135"},{"key":"12_CR22","doi-asserted-by":"crossref","unstructured":"Song, L., Lu, Z., He, R., Sun, Z., Tan, T.: Geometry guided adversarial facial expression synthesis. arXiv preprint arXiv:1712.03474 (2017)","DOI":"10.1145\/3240508.3240612"},{"key":"12_CR23","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"800","DOI":"10.1007\/978-3-319-10599-4_51","volume-title":"Computer Vision \u2013 ECCV 2014","author":"Y Song","year":"2014","unstructured":"Song, Y., Bao, L., Yang, Q., Yang, M.-H.: Real-time exemplar-based face sketch synthesis. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8694, pp. 800\u2013813. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10599-4_51"},{"key":"12_CR24","unstructured":"Sun, R., Huang, C., Shi, J., Ma, L.: Mask-aware photorealistic face attribute manipulation. arXiv preprint arXiv:1804.08882 (2018)"},{"key":"12_CR25","doi-asserted-by":"crossref","unstructured":"Szegedy, C., et al.: Going deeper with convolutions. In: CVPR, pp. 1\u20139 (2015)","DOI":"10.1109\/CVPR.2015.7298594"},{"issue":"8","key":"12_CR26","doi-asserted-by":"publisher","first-page":"3851","DOI":"10.1166\/jctn.2017.6684","volume":"14","author":"Y Tan","year":"2017","unstructured":"Tan, Y., Tang, L., Wang, X.: An improved criminisi inpainting algorithm based on sketch image. J. Comput. Theor. Nanosci. 14(8), 3851\u20133860 (2017)","journal-title":"J. Comput. Theor. Nanosci."},{"issue":"1","key":"12_CR27","first-page":"50","volume":"14","author":"X Tang","year":"2004","unstructured":"Tang, X., Wang, X.: Face sketch recognition. TCSVT 14(1), 50\u201357 (2004)","journal-title":"TCSVT"},{"key":"12_CR28","unstructured":"Tao, M., et al.: DF-GAN: a simple and effective baseline for text-to-image synthesis. arXiv preprint arXiv:2008.05865 (2020)"},{"key":"12_CR29","doi-asserted-by":"publisher","first-page":"271","DOI":"10.1016\/j.forsciint.2015.09.002","volume":"257","author":"P Tome","year":"2015","unstructured":"Tome, P., Vera-Rodriguez, R., Fierrez, J., Ortega-Garcia, J.: Facial soft biometric features for forensic face recognition. Forensic Sci. Int. 257, 271\u2013284 (2015)","journal-title":"Forensic Sci. Int."},{"issue":"3","key":"12_CR30","first-page":"1264","volume":"26","author":"N Wang","year":"2017","unstructured":"Wang, N., Gao, X., Sun, L., Li, J.: Bayesian face sketch synthesis. TIP 26(3), 1264\u20131274 (2017)","journal-title":"TIP"},{"key":"12_CR31","unstructured":"Wang, N., Li, J., Sun, L., Song, B., Gao, X.: Training-free synthesized face sketch recognition using image quality assessment metrics. arXiv preprint arXiv:1603.07823 (2016)"},{"issue":"1","key":"12_CR32","doi-asserted-by":"publisher","first-page":"9","DOI":"10.1007\/s11263-013-0645-9","volume":"106","author":"N Wang","year":"2014","unstructured":"Wang, N., Tao, D., Gao, X., Li, X., Li, J.: A comprehensive survey to face hallucination. IJCV 106(1), 9\u201330 (2014)","journal-title":"IJCV"},{"issue":"11","key":"12_CR33","doi-asserted-by":"publisher","first-page":"1955","DOI":"10.1109\/TPAMI.2008.222","volume":"31","author":"X Wang","year":"2008","unstructured":"Wang, X., Tang, X.: Face photo-sketch synthesis and recognition. TPAMI 31(11), 1955\u20131967 (2008)","journal-title":"TPAMI"},{"issue":"4","key":"12_CR34","first-page":"600","volume":"13","author":"Z Wang","year":"2004","unstructured":"Wang, Z., Bovik, A.C., Sheikh, H.R., Simoncelli, E.P.: Image quality assessment: from error visibility to structural similarity. TIP 13(4), 600\u2013612 (2004)","journal-title":"TIP"},{"key":"12_CR35","doi-asserted-by":"crossref","unstructured":"Xiong, X., Torre, F.D.L.: Supervised descent method and its applications to face alignment. In: CVPR, pp. 532\u2013539 (2013)","DOI":"10.1109\/CVPR.2013.75"},{"key":"12_CR36","doi-asserted-by":"crossref","unstructured":"Xu, T., et al.: Attngan: fine-grained text to image generation with attentional generative adversarial networks. In: CVPR, pp. 1316\u20131324 (2018)","DOI":"10.1109\/CVPR.2018.00143"},{"key":"12_CR37","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"776","DOI":"10.1007\/978-3-319-46493-0_47","volume-title":"Computer Vision \u2013 ECCV 2016","author":"X Yan","year":"2016","unstructured":"Yan, X., Yang, J., Sohn, K., Lee, H.: Attribute2Image: conditional image generation from visual attributes. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9908, pp. 776\u2013791. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46493-0_47"},{"key":"12_CR38","doi-asserted-by":"crossref","unstructured":"Yi, Z., Zhang, H., Tan, P., Gong, M.: Dualgan: unsupervised dual learning for image-to-image translation. In: ICCV, pp. 2868\u20132876 (2017)","DOI":"10.1109\/ICCV.2017.310"},{"key":"12_CR39","doi-asserted-by":"crossref","unstructured":"Yuan, M., Peng, Y.: Text-to-image synthesis via symmetrical distillation networks. arXiv preprint arXiv:1808.06801 (2018)","DOI":"10.1145\/3240508.3240559"},{"key":"12_CR40","unstructured":"Zhang, H., Goodfellow, I., Metaxas, D., Odena, A.: Self-attention generative adversarial networks. In: ICML, pp. 7354\u20137363. PMLR (2019)"},{"key":"12_CR41","doi-asserted-by":"crossref","unstructured":"Zhang, H., et al.: StackGAN++: realistic image synthesis with stacked generative adversarial networks. arXiv preprint arXiv:1710.10916 (2017)","DOI":"10.1109\/ICCV.2017.629"},{"key":"12_CR42","doi-asserted-by":"crossref","unstructured":"Zhang, H., et al.: StackGAN: text to photo-realistic image synthesis with stacked generative adversarial networks. In: ICCV, pp. 5907\u20135915 (2017)","DOI":"10.1109\/ICCV.2017.629"},{"key":"12_CR43","doi-asserted-by":"crossref","unstructured":"Zhang, M., Wang, N., Li, Y., Wang, R., Gao, X.: Face sketch synthesis from coarse to fine. In: AAAI, pp. 7558\u20137565 (2018)","DOI":"10.1609\/aaai.v32i1.12224"},{"key":"12_CR44","doi-asserted-by":"crossref","unstructured":"Zhang, W., Wang, X., Tang, X.: Coupled information-theoretic encoding for face photo-sketch recognition. In: CVPR, pp. 513\u2013520. IEEE (2011)","DOI":"10.1109\/CVPR.2011.5995324"},{"key":"12_CR45","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"438","DOI":"10.1007\/978-3-030-01267-0_26","volume-title":"Computer Vision \u2013 ECCV 2018","author":"C Zou","year":"2018","unstructured":"Zou, C., et al.: SketchyScene: richly-annotated scene sketches. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) ECCV 2018. LNCS, vol. 11219, pp. 438\u2013454. Springer, Cham (2018). https:\/\/doi.org\/10.1007\/978-3-030-01267-0_26"}],"container-title":["Lecture Notes in Computer Science","Pattern Recognition and Computer Vision"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-18916-6_12","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,10,26]],"date-time":"2022-10-26T23:42:37Z","timestamp":1666827757000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-18916-6_12"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031189159","9783031189166"],"references-count":45,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-18916-6_12","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"27 October 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"PRCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Chinese Conference on Pattern Recognition and Computer Vision  (PRCV)","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Shenzhen","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"14 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"ccprcv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/en.prcv.cn\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"microsoft","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"564","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"233","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"41% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.03","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.35","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}