{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T18:57:16Z","timestamp":1757617036010,"version":"3.44.0"},"reference-count":49,"publisher":"Springer Science and Business Media LLC","issue":"23","license":[{"start":{"date-parts":[[2024,9,11]],"date-time":"2024-09-11T00:00:00Z","timestamp":1726012800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,9,11]],"date-time":"2024-09-11T00:00:00Z","timestamp":1726012800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"DOI":"10.1007\/s11042-024-20101-5","type":"journal-article","created":{"date-parts":[[2024,9,11]],"date-time":"2024-09-11T01:02:47Z","timestamp":1726016567000},"page":"26481-26500","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Enhancing eyeglasses removal in facial images: a novel approach using translation models for eyeglasses mask completion"],"prefix":"10.1007","volume":"84","author":[{"given":"Zahra","family":"Esmaily","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9935-7821","authenticated-orcid":false,"given":"Hossein","family":"Ebrahimpour-Komleh","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,11]]},"reference":[{"issue":"9","key":"20101_CR1","doi-asserted-by":"publisher","first-page":"4373","DOI":"10.1109\/TCYB.2020.2995496","volume":"51","author":"B Hu","year":"2020","unstructured":"Hu B, Zheng Z, Liu P, Yang W, Ren M (2020) Unsupervised eyeglasses removal in the wild. IEEE Transact Cybern 51(9):4373\u20134385","journal-title":"IEEE Transact Cybern"},{"key":"20101_CR2","doi-asserted-by":"crossref","unstructured":"Guo J, Zhu X, Zhao C, Cao D, Lei Z, Li SZ (2020) Learning meta face recognition in unseen domains. Proc IEEE\/CVF Conf Comput Vis Patt Recognit. pp. 6163\u20136172","DOI":"10.1109\/CVPR42600.2020.00620"},{"key":"20101_CR3","doi-asserted-by":"crossref","unstructured":"Cao D, Zhu X, Huang X, Guo J, Lei Z (2020) Domain balancing: Face recognition on long-tailed domains. Proc IEEE\/CVF Conf Comput Vis Patt Recognit. pp. 5671\u20135679","DOI":"10.1109\/CVPR42600.2020.00571"},{"issue":"8","key":"20101_CR4","doi-asserted-by":"publisher","first-page":"3191","DOI":"10.1109\/TCYB.2018.2846579","volume":"49","author":"J Gaston","year":"2018","unstructured":"Gaston J, Ming J, Crookes D (2018) Matching larger image areas for unconstrained face identification. IEEE Transact Cybernet 49(8):3191\u20133202","journal-title":"IEEE Transact Cybernet"},{"key":"20101_CR5","doi-asserted-by":"crossref","unstructured":"Sun Y, Xu Q, Li Y, Zhang C, Li Y, Wang S et al (2019) Perceive where to focus: Learning visibility-aware part-level features for partial person re-identification. Proc IEEE\/CVF Conf Comput Vis Patt Recognit. pp. 393\u2013402","DOI":"10.1109\/CVPR.2019.00048"},{"issue":"10","key":"20101_CR6","doi-asserted-by":"publisher","first-page":"4393","DOI":"10.1109\/TCYB.2019.2903205","volume":"50","author":"Y Wang","year":"2019","unstructured":"Wang Y, Tang YY, Li L, Chen H (2019) Modal regression-based atomic representation for robust face recognition and reconstruction. IEEE transactions on cybernetics 50(10):4393\u20134405","journal-title":"IEEE transactions on cybernetics"},{"key":"20101_CR7","doi-asserted-by":"crossref","unstructured":"Lee YH, Lai SH (2020) Byeglassesgan: Identity preserving eyeglasses removal for face images. Comput Vis\u2013ECCV 2020: 16th Eur Conf Glasgow. Springer International Publishing. pp. 243\u2013258","DOI":"10.1007\/978-3-030-58526-6_15"},{"key":"20101_CR8","doi-asserted-by":"crossref","unstructured":"Yang H, Ciftci U, Yin L (2018) Facial expression recognition by de-expression residue learning. Proc IEEE Conf Comput Vis Patt Recognit. pp. 2168\u20132177","DOI":"10.1109\/CVPR.2018.00231"},{"issue":"1","key":"20101_CR9","doi-asserted-by":"publisher","first-page":"73","DOI":"10.1007\/s00530-022-00984-w","volume":"29","author":"RRB DyapadyAnnappa","year":"2023","unstructured":"DyapadyAnnappa RRB (2023) A comprehensive review of facial expression recognition techniques. Multimedia Syst 29(1):73\u2013103","journal-title":"Multimedia Syst"},{"key":"20101_CR10","doi-asserted-by":"crossref","unstructured":"Rangesh A, Zhang B, Trivedi MM (2020) Driver gaze estimation in the real world: Overcoming the eyeglass challenge. 2020 IEEE Intell Veh Symp (IV). pp. 1054\u20131059","DOI":"10.1109\/IV47402.2020.9304573"},{"key":"20101_CR11","doi-asserted-by":"crossref","unstructured":"Lyu J, Wang Z, Xu F (2022) Portrait eyeglasses and shadow removal by leveraging 3d synthetic data. Proc IEEE\/CVF Conf Comput Vis Patt Recognit. pp. 3429\u20133439","DOI":"10.1109\/CVPR52688.2022.00342"},{"key":"20101_CR12","doi-asserted-by":"crossref","unstructured":"Guo J, Zhu X, Lei Z, Li SZ (2018) Face synthesis for eyeglass-robust face recognition. Biom Recognit: 13th Chinese Conf CCBR 2018, Urumqi, China. Springer International Publishing. pp. 275\u2013284","DOI":"10.1007\/978-3-319-97909-0_30"},{"key":"20101_CR13","doi-asserted-by":"crossref","unstructured":"Liu Z, Luo P, Wang X, Tang X (2015) Deep learning face attributes in the wild. Proc IEEE Int Conf Comput Vis. pp. 3730\u20133738","DOI":"10.1109\/ICCV.2015.425"},{"key":"20101_CR14","unstructured":"Goodfellow I, Pouget-Abadie J, Mirza M, Xu B, Warde-Farley D, Ozair S et al (2014) Generative adversarial nets. Adv Neur Inf Proc Syst.\u00a027"},{"key":"20101_CR15","doi-asserted-by":"crossref","unstructured":"Isola P, Zhu JY, Zhou T, Efros AA (2017) Image-to-image translation with conditional adversarial networks. Proc IEEE Conf Comput Vis Patt Recognit. pp. 1125\u20131134","DOI":"10.1109\/CVPR.2017.632"},{"key":"20101_CR16","doi-asserted-by":"crossref","unstructured":"Choi Y, Choi M, Kim M, Ha JW, Kim S, Choo J (2018) Stargan: Unified generative adversarial networks for multi-domain image-to-image translation. Proc IEEE Conf Comput Vis Patt Recognit. pp. 8789\u20138797","DOI":"10.1109\/CVPR.2018.00916"},{"key":"20101_CR17","doi-asserted-by":"crossref","unstructured":"Chu W, Tai Y, Wang C, Li J, Huang F, Ji R (2020) Sscgan: Facial attribute editing via style skip connections. Comput Vis\u2013ECCV 2020: 16th Eur Conf Glasgow, UK. Springer International Publishing. pp. 414\u2013429","DOI":"10.1007\/978-3-030-58555-6_25"},{"key":"20101_CR18","doi-asserted-by":"crossref","unstructured":"Liu M, Ding Y, Xia M, Liu X, Ding E, Zuo W et al (2019) Stgan: A unified selective transfer network for arbitrary image attribute editing. Proc IEEE\/CVF Conf Comput Vis Patt Recognit. pp. 3673\u20133682","DOI":"10.1109\/CVPR.2019.00379"},{"key":"20101_CR19","doi-asserted-by":"crossref","unstructured":"Wu PW, Lin YJ, Chang CH, Chang EY, Liao SW (2019) Relgan: Multi-domain image-to-image translation via relative attributes. Proc IEEE\/CVF Int Conf Comput Vis. pp. 5914\u20135922","DOI":"10.1109\/ICCV.2019.00601"},{"key":"20101_CR20","doi-asserted-by":"crossref","unstructured":"Gao Y, Wei F, Bao J, Gu S, Chen D, Wen F et al (2021) High-fidelity and arbitrary face editing. Proc IEEE\/CVF Conf Comput Vis Patt Recognit. pp. 16115\u201316124","DOI":"10.1109\/CVPR46437.2021.01585"},{"key":"20101_CR21","doi-asserted-by":"crossref","unstructured":"Guo J, Zhu X, Lei Z, Li SZ (2018) Face synthesis for eyeglass-robust face recognition. Chinese Conf Biomet Recognit. Springer International Publishing. pp. 275\u2013284","DOI":"10.1007\/978-3-319-97909-0_30"},{"key":"20101_CR22","doi-asserted-by":"crossref","unstructured":"Cheng M, Cao X (2021) ERGAN: High Perform GAN for Eyeglasses Removal. 16th Int Conf Int Syst Knowl Eng (ISKE). IEEE. pp. 406\u2013411","DOI":"10.1109\/ISKE54062.2021.9755402"},{"issue":"2","key":"20101_CR23","doi-asserted-by":"publisher","first-page":"163","DOI":"10.1016\/j.inffus.2011.09.002","volume":"14","author":"WK Wong","year":"2013","unstructured":"Wong WK, Zhao H (2013) Eyeglasses removal of thermal image based on visible information. Inf Fus 14(2):163\u2013176","journal-title":"Inf Fus"},{"key":"20101_CR24","first-page":"101","volume-title":"Eyeglasses removal from facial image based on mvlr","author":"JS Jin","year":"2013","unstructured":"Jin JS, Xu C, Xu M, Zhang Z, Peng Y (2013) Eyeglasses removal from facial image based on mvlr. The Era of Interactive Media. Springer, New York, pp 101\u2013109"},{"key":"20101_CR25","doi-asserted-by":"crossref","unstructured":"Rangesh A, Zhang B, Trivedi MM (2020) Driver gaze estimation in the real world: Overcoming the eyeglass challenge.\"\u00a02020 IEEE Int Veh Symp (IV). pp. 1054\u20131059","DOI":"10.1109\/IV47402.2020.9304573"},{"key":"20101_CR26","doi-asserted-by":"crossref","unstructured":"Kang S, Hahn T (2021) Eyeglass Remover Network based on a Synthetic Image Dataset. KSII Transact Int Inf Syst. 15(4)","DOI":"10.3837\/tiis.2021.04.016"},{"key":"20101_CR27","doi-asserted-by":"crossref","unstructured":"Liang M, Xue Y, Xue K, Yang A (2017) Deep convolution neural networks for automatic eyeglasses removal. DEStech Transact Comput Sci Eng","DOI":"10.12783\/dtcse\/aiea2017\/14988"},{"key":"20101_CR28","doi-asserted-by":"publisher","first-page":"2691","DOI":"10.1007\/s11042-020-09715-7","volume":"80","author":"M Zhao","year":"2021","unstructured":"Zhao M, Zhang Z, Zhang X, Zhang L, Li B (2021) Eyeglasses removal based on attributes detection and improved TV restoration model. Multimed Tools Appl 80:2691\u20132712","journal-title":"Multimed Tools Appl"},{"key":"20101_CR29","doi-asserted-by":"crossref","unstructured":"Liu Y, Li Q, Deng Q, Sun Z, Yang MH (2023) Gan-based facial attribute manipulation. IEEE Transact Patt Anal Mach Intell","DOI":"10.1109\/TPAMI.2023.3298868"},{"key":"20101_CR30","doi-asserted-by":"crossref","unstructured":"Zhang G, Kan M, Shan S, Chen X (2018) Generative adversarial network with spatial attention for face attribute editing. Proc Eur Conf Comput Vis (ECCV). pp. 417\u2013432","DOI":"10.1007\/978-3-030-01231-1_26"},{"key":"20101_CR31","doi-asserted-by":"crossref","unstructured":"Laishram L, Shaheryar M, Lee JT, Jung SK (2023) High-Quality Face Caricature via Style Translation. IEEE Access","DOI":"10.1109\/ACCESS.2023.3340788"},{"key":"20101_CR32","doi-asserted-by":"crossref","unstructured":"Jo Y, Park J (2019) Sc-fegan: Face editing generative adversarial network with user's sketch and color. Proc IEEE\/CVF Int Conf Comput Vis. pp. 1745\u20131753","DOI":"10.1109\/ICCV.2019.00183"},{"key":"20101_CR33","doi-asserted-by":"crossref","unstructured":"Liu G, Reda FA, Shih KJ, Wang TC, Tao A, Catanzaro B (2018) Image inpainting for irregular holes using partial convolutions. Proc Eur Conf Comput Vis (ECCV). pp. 85\u2013100","DOI":"10.1007\/978-3-030-01252-6_6"},{"key":"20101_CR34","doi-asserted-by":"crossref","unstructured":"Yu J, Lin Z, Yang J, Shen X, Lu X, Huang TS (2019) Free-form image inpainting with gated convolution. Proc IEEE\/CVF Int Conf Comput Vis. pp. 4471\u20134480","DOI":"10.1109\/ICCV.2019.00457"},{"key":"20101_CR35","doi-asserted-by":"crossref","unstructured":"Yu J, Lin Z, Yang J, Shen X, Lu X, Huang TS (2018) Generative image inpainting with contextual attention. Proc IEEE Conf Comput Vis Patt Recognit. pp. 5505\u20135514","DOI":"10.1109\/CVPR.2018.00577"},{"key":"20101_CR36","doi-asserted-by":"crossref","unstructured":"Tan C, Sun F, Kong T, Zhang W, Yang C, Liu C (2018) A survey on deep transfer learning. Artif Neur Netw Mach Learn\u2013ICANN 2018: 27th Int Conf Artif Neur Netw Rhodes, Greece. Springer International Publishing. pp. 270\u2013279","DOI":"10.1007\/978-3-030-01424-7_27"},{"issue":"1","key":"20101_CR37","doi-asserted-by":"publisher","first-page":"013001","DOI":"10.1117\/1.JEI.32.1.013001","volume":"32","author":"Z Esmaeily","year":"2023","unstructured":"Esmaeily Z, Rezaeian M (2023) Building roof wireframe extraction from aerial images using a three-stream deep neural network. J Electron Imaging 32(1):013001\u2013013001","journal-title":"J Electron Imaging"},{"issue":"10","key":"20101_CR38","doi-asserted-by":"publisher","first-page":"2148","DOI":"10.3390\/sym14102148","volume":"14","author":"C Shao","year":"2022","unstructured":"Shao C, Li X, Li F, Zhou Y (2022) Large Mask Image Completion with Conditional GAN. Symmetry 14(10):2148","journal-title":"Symmetry"},{"key":"20101_CR39","doi-asserted-by":"crossref","unstructured":"Sreedhar K, Panlal B (2012) Enhancement of images using morphological transformation. arXiv preprint","DOI":"10.5121\/ijcsit.2012.4103"},{"key":"20101_CR40","doi-asserted-by":"crossref","unstructured":"Zhu JY, Park T, Isola P, Efros AA (2017) Unpaired image-to-image translation using cycle-consistent adversarial networks. Proc IEEE Int Conf Comput Vis. pp. 2223\u20132232","DOI":"10.1109\/ICCV.2017.244"},{"key":"20101_CR41","volume-title":"Debiasing Image Generative Models","author":"MM Tanjim","year":"2023","unstructured":"Tanjim MM (2023) Debiasing Image Generative Models. University of California, San Diego"},{"key":"20101_CR42","doi-asserted-by":"crossref","unstructured":"Ronneberger O, Fischer P, Brox T (2015) U-net: Convolutional networks for biomedical image segmentation. Med Image Comput Comput-Ass Intervent\u2013MICCAI 2015: 18th Int Conf Munich, Germany. Springer International Publishing. pp. 234\u2013241","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"20101_CR43","doi-asserted-by":"publisher","first-page":"82031","DOI":"10.1109\/ACCESS.2021.3086020","volume":"9","author":"N Siddique","year":"2021","unstructured":"Siddique N, Paheding S, Elkin CP, Devabhaktuni V (2021) U-net and its variants for medical image segmentation: A review of theory and applications. IEEE Access 9:82031\u201382057","journal-title":"IEEE Access"},{"key":"20101_CR44","unstructured":"Henry J, Natalie T, Madsen D (2021) Pix2Pix GAN for Image-to-Image Translation. Res Gate Publication. pp. 1\u20135"},{"key":"20101_CR45","unstructured":"Heusel M, Ramsauer H, Unterthiner T, Nessler B, Hochreiter S (2017) Gans trained by a two time-scale update rule converge to a local nash equilibrium. Adv Neur Inf Proc Syst. 30"},{"key":"20101_CR46","unstructured":"Bi\u0144kowski M, Sutherland DJ, Arbel M, Gretton A (2018) Demystifying mmd gans. arXiv preprint"},{"key":"20101_CR47","doi-asserted-by":"crossref","unstructured":"Li X, Zhang S, Hu J, Cao L, Hong X, Mao X et al (2021) Image-to-image translation via hierarchical style disentanglement. Proc IEEE\/CVF Conf Comput Vis Patt Recognit. pp. 8639\u20138648","DOI":"10.1109\/CVPR46437.2021.00853"},{"key":"20101_CR48","unstructured":"Karras T, Aila T, Laine S, Lehtinen J (2018) Progressive growing of GANs for improved quality, stability, and variation. Int Conf Learn Represent"},{"key":"20101_CR49","doi-asserted-by":"crossref","unstructured":"Parmar G, Zhang R, Zhu JY (2022) On aliased resizing and surprising subtleties in gan evaluation. Proc IEEE\/CVF Conf Comput Vis Patt Recognit. pp. 11410\u201311420","DOI":"10.1109\/CVPR52688.2022.01112"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-024-20101-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-024-20101-5\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-024-20101-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,5]],"date-time":"2025-09-05T22:10:03Z","timestamp":1757110203000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-024-20101-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,11]]},"references-count":49,"journal-issue":{"issue":"23","published-online":{"date-parts":[[2025,7]]}},"alternative-id":["20101"],"URL":"https:\/\/doi.org\/10.1007\/s11042-024-20101-5","relation":{},"ISSN":["1573-7721"],"issn-type":[{"type":"electronic","value":"1573-7721"}],"subject":[],"published":{"date-parts":[[2024,9,11]]},"assertion":[{"value":"14 April 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 July 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 August 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"11 September 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have declared that there is no conflict of interest exists.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflicts of interest"}}]}}