{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,10]],"date-time":"2026-03-10T04:32:50Z","timestamp":1773117170673,"version":"3.50.1"},"reference-count":33,"publisher":"Springer Science and Business Media LLC","issue":"20","license":[{"start":{"date-parts":[[2024,7,22]],"date-time":"2024-07-22T00:00:00Z","timestamp":1721606400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,7,22]],"date-time":"2024-07-22T00:00:00Z","timestamp":1721606400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"the National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["61772309"],"award-info":[{"award-number":["61772309"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"the Shandong Province Central Government-Guided Local Science and Technology Development Fund Project","award":["YDZX2023079"],"award-info":[{"award-number":["YDZX2023079"]}]},{"name":"the Jinan City \u2019New Universities 20 Articles\u2019 Scientific Research Leaders Studio","award":["2021GXRC092"],"award-info":[{"award-number":["2021GXRC092"]}]},{"name":"the Shandong Province Higher Education Institutions Youth Innovation Science and Technology Support Plan","award":["2020KJN007"],"award-info":[{"award-number":["2020KJN007"]}]},{"name":"the Shandong Province Key Research and Development Plan","award":["2021SFGC0102"],"award-info":[{"award-number":["2021SFGC0102"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Appl Intell"],"published-print":{"date-parts":[[2024,10]]},"DOI":"10.1007\/s10489-024-05692-8","type":"journal-article","created":{"date-parts":[[2024,7,22]],"date-time":"2024-07-22T10:01:58Z","timestamp":1721642518000},"page":"9741-9759","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["A multi-granularity facial extreme makeup transfer and removal model with local-global collaboration"],"prefix":"10.1007","volume":"54","author":[{"given":"Yuyan","family":"Chen","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0307-0608","authenticated-orcid":false,"given":"Jing","family":"Chi","sequence":"additional","affiliation":[]},{"given":"Tianshu","family":"Shen","sequence":"additional","affiliation":[]},{"given":"Bingyi","family":"You","sequence":"additional","affiliation":[]},{"given":"Yanbing","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Caiming","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,7,22]]},"reference":[{"key":"5692_CR1","doi-asserted-by":"crossref","unstructured":"Tong WS, Tang CK, Brown MS et\u00a0al (2007) Example-based cosmetic transfer. In: 15th Pacific conference on computer graphics and applications (PG\u201907). IEEE, pp 211\u2013218","DOI":"10.1109\/PG.2007.31"},{"key":"5692_CR2","unstructured":"Guo D, Sim T (2009) Digital face makeup by example. In: 2009 IEEE conference on computer vision and pattern recognition. IEEE, pp 73\u201379"},{"key":"5692_CR3","doi-asserted-by":"crossref","unstructured":"Li C, Zhou K, Lin S (2015) Simulating makeup through physics-based manipulation of intrinsic image layers. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp 4621\u20134629","DOI":"10.1109\/CVPR.2015.7299093"},{"key":"5692_CR4","doi-asserted-by":"crossref","unstructured":"Li T, Qian R, Dong C et\u00a0al (2018) Beautygan: Instance-level facial makeup transfer with deep generative adversarial network. In: Proceedings of the 26th ACM international conference on multimedia. pp 645\u2013653","DOI":"10.1145\/3240508.3240618"},{"key":"5692_CR5","doi-asserted-by":"crossref","unstructured":"Jiang W, Liu S, Gao C et\u00a0al (2020) Psgan: Pose and expression robust spatial-aware gan for customizable makeup transfer. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. pp 5194\u20135202","DOI":"10.1109\/CVPR42600.2020.00524"},{"key":"5692_CR6","doi-asserted-by":"crossref","unstructured":"Deng H, Han C, Cai H et\u00a0al (2021) Spatially-invariant style-codes controlled makeup transfer. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. pp 6549\u20136557","DOI":"10.1109\/CVPR46437.2021.00648"},{"key":"5692_CR7","doi-asserted-by":"crossref","unstructured":"Gu Q, Wang G, Chiu MT et\u00a0al (2019) Ladn: Local adversarial disentangling network for facial makeup and de-makeup. In: Proceedings of the IEEE\/CVF international conference on computer vision. pp 10481\u201310490","DOI":"10.1109\/ICCV.2019.01058"},{"key":"5692_CR8","doi-asserted-by":"crossref","unstructured":"Nguyen T, Tran AT, Hoai M (2021) Lipstick ain\u2019t enough: beyond color matching for in-the-wild makeup transfer. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. pp 13305\u201313314","DOI":"10.1109\/CVPR46437.2021.01310"},{"issue":"1s","key":"5692_CR9","first-page":"1","volume":"11","author":"L Liu","year":"2014","unstructured":"Liu L, Xing J, Liu S et al (2014) Wow! you are so beautiful today! ACM Trans Multimedia Comput, Commun. Appl (TOMM) 11(1s):1\u201322","journal-title":"Appl (TOMM)"},{"issue":"2","key":"5692_CR10","doi-asserted-by":"publisher","first-page":"485","DOI":"10.1111\/j.1467-8659.2011.01874.x","volume":"30","author":"K Scherbaum","year":"2011","unstructured":"Scherbaum K, Ritschel T, Hullin M et al (2011) Computer-suggested facial makeup. Comput Graph Forum 30(2):485-492","journal-title":"Comput Graph Forum"},{"issue":"1","key":"5692_CR11","doi-asserted-by":"publisher","first-page":"e2199","DOI":"10.1002\/cav.2199","volume":"35","author":"M Gao","year":"2023","unstructured":"Gao M, Wang P (2023) Personalized facial makeup transfer based on outline correspondence. Comput Animation Virtual Worlds 35(1):e2199","journal-title":"Comput Animation Virtual Worlds"},{"issue":"12","key":"5692_CR12","doi-asserted-by":"publisher","first-page":"6521","DOI":"10.1007\/s00371-022-02746-1","volume":"39","author":"H Tiwari","year":"2023","unstructured":"Tiwari H, Subramanian VK, Chen YS (2023) Real-time self-supervised achromatic face colorization. Vis Comput 39(12):6521\u20136536","journal-title":"Vis Comput"},{"key":"5692_CR13","doi-asserted-by":"crossref","unstructured":"Yuan YJ, Han X, He Y, et\u00a0al (2024) Munerf: Robust makeup transfer in neural radiance fields. IEEE Trans Vis Comput Graph 1\u201312","DOI":"10.1109\/TVCG.2024.3368443"},{"issue":"4","key":"5692_CR14","doi-asserted-by":"publisher","first-page":"2876","DOI":"10.1109\/TCSVT.2023.3312790","volume":"34","author":"M Li","year":"2023","unstructured":"Li M, Yu W, Liu Q et al (2023) Hybrid transformers with attention-guided spatial embeddings for makeup transfer and removal. IEEE Trans Circuits Syst Video Technol 34(4):2876\u20132890","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"5692_CR15","doi-asserted-by":"publisher","first-page":"108459","DOI":"10.1016\/j.compeleceng.2022.108459","volume":"104","author":"F Zhang","year":"2022","unstructured":"Zhang F, Liang X, Sun Y et al (2022) Pofmakeup: A style transfer method for peking opera makeup. Comput Electr Eng 104:108459","journal-title":"Comput Electr Eng"},{"key":"5692_CR16","doi-asserted-by":"crossref","unstructured":"Yan Q, Guo C, Zhao J et\u00a0al (2023) Beautyrec: Robust, efficient, and component-specific makeup transfer. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. pp 1102\u20131110","DOI":"10.1109\/CVPRW59228.2023.00117"},{"key":"5692_CR17","doi-asserted-by":"crossref","unstructured":"Sun Z, Chen Y, Xiong S (2022) Ssat: A symmetric semantic-aware transformer network for makeup transfer and removal. In: Proceedings of the AAAI conference on artificial intelligence. pp 2325\u20132334","DOI":"10.1609\/aaai.v36i2.20131"},{"key":"5692_CR18","doi-asserted-by":"crossref","unstructured":"Lu X, Liu F, Rong Y et\u00a0al (2024) Makeupdiffuse: a double image-controlled diffusion model for exquisite makeup transfer. The Vis Comput 1\u201317","DOI":"10.1007\/s00371-024-03317-2"},{"issue":"11","key":"5692_CR19","doi-asserted-by":"publisher","first-page":"139","DOI":"10.1145\/3422622","volume":"63","author":"I Goodfellow","year":"2020","unstructured":"Goodfellow I, Pouget-Abadie J, Mirza M et al (2020) Generative adversarial networks. Commun ACM 63(11):139\u2013144","journal-title":"Commun ACM"},{"key":"5692_CR20","doi-asserted-by":"crossref","unstructured":"Li S, Pu Y, Zhao Z et\u00a0al (2024) Dual-path hypernetworks of style and text for one-shot domain adaptation. Appl Intell","DOI":"10.1007\/s10489-023-05229-5"},{"issue":"4","key":"5692_CR21","first-page":"44009","volume":"83","author":"X Zhu","year":"2024","unstructured":"Zhu X, Cao X, Wang L et al (2024) Dccmf-gan: double cycle consistently constrained multi-feature discrimination gan for makeup transfer. Multimedia Tools Appl 83(4):44009\u201344022","journal-title":"Multimedia Tools Appl"},{"key":"5692_CR22","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2022.104400","volume":"120","author":"QL Yuan","year":"2022","unstructured":"Yuan QL, Zhang HL (2022) Ramt-gan: Realistic and accurate makeup transfer with generative adversarial network. Image Vis Comput 120:104400","journal-title":"Image Vis Comput"},{"issue":"3","key":"5692_CR23","doi-asserted-by":"publisher","first-page":"e2235","DOI":"10.1002\/cav.2235","volume":"35","author":"W Xu","year":"2024","unstructured":"Xu W, Wang P, Yang X (2024) Frsegan: Free-style editable facial makeup transfer based on gan combined with transformer. Comput Animat Virtual Worlds 35(3):e2235","journal-title":"Comput Animat Virtual Worlds"},{"key":"5692_CR24","doi-asserted-by":"publisher","first-page":"2166","DOI":"10.1007\/s11263-019-01267-0","volume":"128","author":"Y Li","year":"2020","unstructured":"Li Y, Huang H, Cao J et al (2020) Disentangled representation learning of makeup portraits in the wild. Int J Comput Vision 128:2166\u20132184","journal-title":"Int J Comput Vision"},{"issue":"11","key":"5692_CR25","doi-asserted-by":"publisher","first-page":"8538","DOI":"10.1109\/TPAMI.2021.3083484","volume":"44","author":"S Liu","year":"2021","unstructured":"Liu S, Jiang W, Gao C et al (2021) Psgan++: robust detail-preserving makeup transfer and removal. IEEE Trans Pattern Anal Mach Intell 44(11):8538\u20138551","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"issue":"9","key":"5692_CR26","doi-asserted-by":"publisher","first-page":"e7994","DOI":"10.1002\/cpe.7994","volume":"36","author":"Y Chen","year":"2024","unstructured":"Chen Y, Xie J, Xue J et al (2024) A robust transformer gan for unpaired data makeup transfer. Concurr Computat: Pract Experience 36(9):e7994","journal-title":"Concurr Computat: Pract Experience"},{"key":"5692_CR27","doi-asserted-by":"crossref","unstructured":"Yang C, He W, Xu Y et\u00a0al (2022) Elegant: Exquisite and locally editable gan for makeup transfer. In: European conference on computer vision. Springer, pp 737\u2013754","DOI":"10.1007\/978-3-031-19787-1_42"},{"key":"5692_CR28","doi-asserted-by":"crossref","unstructured":"Hao M, Gu G, Fu H et al (2022) Cumtgan: An instance-level controllable u-net gan for facial makeup transfer. Knowl-Based Syst 249:108996","DOI":"10.1016\/j.knosys.2022.108996"},{"key":"5692_CR29","doi-asserted-by":"publisher","first-page":"103464","DOI":"10.1016\/j.jvcir.2022.103464","volume":"85","author":"S Fang","year":"2022","unstructured":"Fang S, Duan M, Li K et al (2022) Facial makeup transfer with gan for different aging faces. J Vis Commun Image Represent 85:103464","journal-title":"J Vis Commun Image Represent"},{"key":"5692_CR30","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2022.109958","volume":"257","author":"Z Xu","year":"2022","unstructured":"Xu Z, Wu S, Jiao Q et al (2022) Tsev-gan: Generative adversarial networks with target-aware style encoding and verification for facial makeup transfer. Knowl-Based Syst 257:109958","journal-title":"Knowl-Based Syst"},{"issue":"17","key":"5692_CR31","doi-asserted-by":"publisher","first-page":"20441","DOI":"10.1007\/s10489-023-04576-7","volume":"53","author":"H Chen","year":"2023","unstructured":"Chen H, Li W, Gao X et al (2023) Aep-gan: Aesthetic enhanced perception generative adversarial network for asian facial beauty synthesis. Appl Intell 53(17):20441\u201320468","journal-title":"Appl Intell"},{"key":"5692_CR32","doi-asserted-by":"crossref","unstructured":"Deng J, Guo J, Xue N et\u00a0al (2019) Arcface: Additive angular margin loss for deep face recognition. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. pp 4690\u20134699","DOI":"10.1109\/CVPR.2019.00482"},{"key":"5692_CR33","unstructured":"Heusel M, Ramsauer H, Unterthiner T et\u00a0al (2017) Gans trained by a two time-scale update rule converge to a local nash equilibrium. Adv Neural Inf Process Syst 30"}],"container-title":["Applied Intelligence"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-024-05692-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10489-024-05692-8\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10489-024-05692-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,15]],"date-time":"2024-08-15T13:23:27Z","timestamp":1723728207000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10489-024-05692-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7,22]]},"references-count":33,"journal-issue":{"issue":"20","published-print":{"date-parts":[[2024,10]]}},"alternative-id":["5692"],"URL":"https:\/\/doi.org\/10.1007\/s10489-024-05692-8","relation":{},"ISSN":["0924-669X","1573-7497"],"issn-type":[{"value":"0924-669X","type":"print"},{"value":"1573-7497","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,7,22]]},"assertion":[{"value":"14 July 2024","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 July 2024","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"Not applicable","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval"}},{"value":"Not applicable","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to participate"}},{"value":"Not applicable","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}},{"value":"All the authors declare that there is no confict of interest.","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}