{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,21]],"date-time":"2026-02-21T16:26:02Z","timestamp":1771691162088,"version":"3.50.1"},"reference-count":43,"publisher":"Springer Science and Business Media LLC","issue":"11","license":[{"start":{"date-parts":[[2023,9,16]],"date-time":"2023-09-16T00:00:00Z","timestamp":1694822400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,9,16]],"date-time":"2023-09-16T00:00:00Z","timestamp":1694822400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimed Tools Appl"],"DOI":"10.1007\/s11042-023-16822-8","type":"journal-article","created":{"date-parts":[[2023,9,16]],"date-time":"2023-09-16T04:01:32Z","timestamp":1694836892000},"page":"31341-31360","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["LSRF: localized and sparse receptive fields for linear facial expression synthesis based on global face context"],"prefix":"10.1007","volume":"83","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-2661-4855","authenticated-orcid":false,"given":"Arbish","family":"Akram","sequence":"first","affiliation":[]},{"given":"Nazar","family":"Khan","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,9,16]]},"reference":[{"key":"16822_CR1","unstructured":"Goodfellow, I, Pouget-Abadie, J, Mirza, M, Xu, B, Warde-Farley, D, Ozair, S, Courville, A, Bengio, Y (2014) Generative Adversarial Nets. In: Advances in neural information processing systems, pp 2672\u20132680"},{"key":"16822_CR2","unstructured":"Mirza, M, Osindero, S (2014) Conditional generative adversarial nets. arXiv:1411.1784"},{"key":"16822_CR3","unstructured":"Perarnau, G, van de Weijer, J, Raducanu, B, \u00c1lvarez, JM (2016) Invertible conditional GANs for image editing. arXiv:1611.06355"},{"key":"16822_CR4","unstructured":"Karras, T, Aila, T, Laine, S, Lehtinen, J (2018) Progressive growing of GANs for improved quality, stability, and variation. In: International conference on learning representations"},{"key":"16822_CR5","doi-asserted-by":"crossref","unstructured":"Chen, Y, Tai, Y, Liu, X, Shen, C, Yang, J (2018) FSRNet: End-to-End learning face super-resolution with facial priors. In: IEEE conference on computer vision and pattern recognition, pp 2492\u20132501","DOI":"10.1109\/CVPR.2018.00264"},{"key":"16822_CR6","doi-asserted-by":"publisher","first-page":"1219","DOI":"10.1109\/TIP.2020.3043093","volume":"30","author":"C Chen","year":"2020","unstructured":"Chen C, Gong D, Wang H, Li Z, Wong K-YK (2020) Learning spatial attention for face super-resolution. IEEE Trans Image Process 30:1219\u20131231","journal-title":"IEEE Trans Image Process"},{"key":"16822_CR7","doi-asserted-by":"publisher","first-page":"1184","DOI":"10.1109\/TIP.2023.3240845","volume":"32","author":"H Hou","year":"2023","unstructured":"Hou H, Xu J, Hou Y, Hu X, Wei B, Shen D (2023) Semi-cycled generative adversarial networks for real-world face super-resolution. IEEE Trans Image Process 32:1184\u20131199","journal-title":"IEEE Trans Image Process"},{"issue":"11","key":"16822_CR8","doi-asserted-by":"publisher","first-page":"5464","DOI":"10.1109\/TIP.2019.2916751","volume":"28","author":"Z He","year":"2019","unstructured":"He Z, Zuo W, Kan M, Shan S, Chen X (2019) AttGAN: Facial attribute editing by only changing what you want. IEEE Trans Image Process 28(11):5464\u20135478","journal-title":"IEEE Trans Image Process"},{"key":"16822_CR9","doi-asserted-by":"crossref","unstructured":"Liu, M, Ding, Y, Xia, M, Liu, X, Ding, E, Zuo, W, Wen, S (2019) STGAN: a unified selective transfer network for arbitrary image attribute editing. In: IEEE international conference on computer vision, pp 3673\u20133682","DOI":"10.1109\/CVPR.2019.00379"},{"key":"16822_CR10","doi-asserted-by":"crossref","unstructured":"Gao, Y, Wei, F, Bao, J, Gu, S, Chen, D, Wen, F, Lian, Z (2021) High-fidelity and arbitrary face editing. In: IEEE conference on computer vision and pattern recognition, pp 16115\u201316124","DOI":"10.1109\/CVPR46437.2021.01585"},{"key":"16822_CR11","doi-asserted-by":"crossref","unstructured":"Choi, Y, Choi, M, Kim, M, Ha, J-W, Kim, S, Choo, J (2018) StarGAN: unified generative adversarial networks for multi-domain image-to-image translation. In: IEEE conference on computer vision and pattern recognition, pp 8789\u20138797","DOI":"10.1109\/CVPR.2018.00916"},{"issue":"3","key":"16822_CR12","doi-asserted-by":"publisher","first-page":"698","DOI":"10.1007\/s11263-019-01210-3","volume":"128","author":"A Pumarola","year":"2020","unstructured":"Pumarola A, Agudo A, Martinez AM, Sanfeliu A, Moreno-Noguer F (2020) GANimation: one-shot anatomically consistent facial animation. Int J Comput Vis 128(3):698\u2013713","journal-title":"Int J Comput Vis"},{"key":"16822_CR13","doi-asserted-by":"crossref","unstructured":"Wu, R, Zhang, G, Lu, S, Chen, T (2020) Cascade EF-GAN: progressive facial expression editing with local focuses. In: IEEE conference on computer vision and pattern recognition, pp 5021\u20135030","DOI":"10.1109\/CVPR42600.2020.00507"},{"key":"16822_CR14","doi-asserted-by":"crossref","unstructured":"Akram, A, Khan, N (2023) US-GAN: on the importance of ultimate skip connection for facial expression synthesis. Multimedia Tools and Applications","DOI":"10.1007\/s11042-023-15268-2"},{"key":"16822_CR15","doi-asserted-by":"crossref","unstructured":"Akram, A, Khan, N (2023) SARGAN: Spatial attention-based residuals for facial expression manipulation. IEEE TCSVT","DOI":"10.1109\/TCSVT.2023.3255243"},{"issue":"5","key":"16822_CR16","doi-asserted-by":"publisher","first-page":"1433","DOI":"10.1007\/s11263-019-01256-3","volume":"128","author":"N Khan","year":"2020","unstructured":"Khan N, Akram A, Mahmood A, Ashraf S, Murtaza K (2020) Masked Linear Regression for Learning Local Receptive Fields for Facial Expression Synthesis. International Journal of Computer Vision 128(5):1433\u20131454","journal-title":"International Journal of Computer Vision"},{"key":"16822_CR17","doi-asserted-by":"crossref","unstructured":"Akram, A, Khan, N (2021) Pixel-based facial expression synthesis. In: International conference on pattern recognition, pp 9733\u20139739. IEEE","DOI":"10.1109\/ICPR48806.2021.9413065"},{"key":"16822_CR18","unstructured":"Pati, YC, Rezaiifar, R, Krishnaprasad, PS (1993) Orthogonal matching pursuit: recursive function approximat ion with applications to wavelet decomposition. In: Proceedings of 27th asilomar conference on signals, systems and computers, pp 40\u201344. IEEE"},{"issue":"12","key":"16822_CR19","doi-asserted-by":"publisher","first-page":"4655","DOI":"10.1109\/TIT.2007.909108","volume":"53","author":"JA Tropp","year":"2007","unstructured":"Tropp JA, Gilbert AC (2007) Signal recovery from random measurements via orthogonal matching pursuit. IEEE Trans Inf Theory 53(12):4655\u20134666","journal-title":"IEEE Trans Inf Theory"},{"key":"16822_CR20","doi-asserted-by":"crossref","unstructured":"Karras, T, Laine, S, Aila, T (2019) A style-based generator architecture for generative adversarial networks. In: IEEE conference on computer vision and pattern recognition, pp 4401\u20134410","DOI":"10.1109\/CVPR.2019.00453"},{"key":"16822_CR21","doi-asserted-by":"crossref","unstructured":"Isola, P, Zhu, J-Y, Zhou, T, Efros, AA (2017) Image-to-image translation with conditional adversarial networks. In: IEEE conference on computer vision and pattern recognition, pp 1125\u20131134","DOI":"10.1109\/CVPR.2017.632"},{"key":"16822_CR22","doi-asserted-by":"crossref","unstructured":"Zhang, Z, Song, Y, Qi, H (2017) Age progression\/regression by conditional adversarial autoencoder. In: IEEE conference on computer vision and pattern recognition, pp 5810\u20135818","DOI":"10.1109\/CVPR.2017.463"},{"key":"16822_CR23","doi-asserted-by":"publisher","first-page":"2402","DOI":"10.1007\/s11263-019-01284-z","volume":"128","author":"H-Y Lee","year":"2020","unstructured":"Lee H-Y, Tseng H-Y, Mao Q, Huang J-B, Lu Y-D, Singh M, Yang M-H (2020) DRIT++: Diverse image-to-image translation via disentangled representations. Int J Comput Vis 128:2402\u20132417","journal-title":"Int J Comput Vis"},{"issue":"1","key":"16822_CR24","doi-asserted-by":"publisher","first-page":"560","DOI":"10.1109\/TPAMI.2022.3155571","volume":"45","author":"Y Nirkin","year":"2022","unstructured":"Nirkin Y, Keller Y, Hassner T (2022) FSGANv2: improved subject agnostic face swapping and reenactment. IEEE Transactions on Pattern Analysis & Machine Intelligence 45(1):560\u2013575","journal-title":"IEEE Transactions on Pattern Analysis & Machine Intelligence"},{"issue":"4","key":"16822_CR25","doi-asserted-by":"publisher","first-page":"1986","DOI":"10.1109\/TAFFC.2022.3207007","volume":"13","author":"H Tang","year":"2022","unstructured":"Tang H, Sebe N (2022) Facial expression translation using landmark guided GANs. IEEE Trans Affect Comput 13(4):1986\u20131997","journal-title":"IEEE Trans Affect Comput"},{"key":"16822_CR26","doi-asserted-by":"crossref","unstructured":"Shen, W, Liu, R (2017) Learning residual images for face attribute manipulation. In: IEEE conference on computer vision and pattern recognition, pp 4030\u20134038","DOI":"10.1109\/CVPR.2017.135"},{"key":"16822_CR27","doi-asserted-by":"crossref","unstructured":"Chen, Y-C, Xu, X, Jia, J (2020) Domain Adaptive Image-to-image Translation. In: IEEE conference on computer vision and pattern recognition, pp 5274\u20135283","DOI":"10.1109\/CVPR42600.2020.00532"},{"key":"16822_CR28","unstructured":"Arjovsky, M, Chintala, S, Bottou, L (2017) Wasserstein generative adversarial networks. In: International conference on machine learning, pp 214\u2013223"},{"key":"16822_CR29","unstructured":"Gulrajani, I, Ahmed, F, Arjovsky, M, Dumoulin, V, Courville, A (2017) Improved training of wasserstein GANs. arXiv:1704.00028"},{"key":"16822_CR30","doi-asserted-by":"crossref","unstructured":"Zhang, H, Xu, T, Li, H, Zhang, S, Huang, X, Wang, X, Metaxas, D (2017) StackGAN: text to photo-realistic image synthesis with stacked generative adversarial networks. In: IEEE international conference on computer vision, pp 5907\u20135915","DOI":"10.1109\/ICCV.2017.629"},{"key":"16822_CR31","doi-asserted-by":"crossref","unstructured":"Patashnik, O, Wu, Z, Shechtman, E, Cohen-Or, D, Lischinski, D (2021) StyleCLIP: text-driven manipulation of stylegan imagery. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp 2085\u20132094","DOI":"10.1109\/ICCV48922.2021.00209"},{"key":"16822_CR32","doi-asserted-by":"crossref","unstructured":"Song, L, Lu, Z, He, R, Sun, Z, Tan, T (2018) Geometry guided adversarial facial expression synthesis. In: Proceedings of the 26th ACM international conference on multimedia, pp 627\u2013635","DOI":"10.1145\/3240508.3240612"},{"key":"16822_CR33","unstructured":"Qiao, F, Yao, N, Jiao, Z, Li, Z, Chen, H, Wang, H (2018) Geometry-contrastive generative adversarial network for facial expression synthesis. arXiv:1802.01822"},{"key":"16822_CR34","doi-asserted-by":"crossref","unstructured":"Ding, H, Sricharan, K, Chellappa, R (2018) ExprGAN: facial expression editing with controllable expression intensity. In: Proceedings of the AAAI conference on artificial intelligence, vol 32","DOI":"10.1609\/aaai.v32i1.12277"},{"key":"16822_CR35","doi-asserted-by":"crossref","unstructured":"Ling, J, Xue, H, Song, L, Yang, S, Xie, R, Gu, X (2020) Toward fine-grained facial expression manipulation. In: IEEE international conference on computer vision, pp 37\u201353. Springer","DOI":"10.1007\/978-3-030-58604-1_3"},{"key":"16822_CR36","doi-asserted-by":"crossref","unstructured":"Ronneberger, O, Fischer, P, Brox, T (2015) U-Net: convolutional networks for biomedical image segmentation. In: International conference on medical image computing and computer-assisted intervention, pp 234\u2013241. Springer","DOI":"10.1007\/978-3-319-24574-4_28"},{"issue":"3","key":"16822_CR37","first-page":"1443","volume":"32","author":"Y Xia","year":"2021","unstructured":"Xia Y, Zheng W, Wang Y, Yu H, Dong J, Wang F-Y (2021) Local and global perception generative adversarial network for facial expression synthesis. IEEE TCSVT 32(3):1443\u20131452","journal-title":"IEEE TCSVT"},{"key":"16822_CR38","doi-asserted-by":"crossref","unstructured":"d\u2019Apolito, S, Paudel, DP, Huang, Z, Romero, A, Van Gool, L (2021) GANmut: learning interpretable conditional space for gamut of emotions. In: IEEE conference on computer vision and pattern recognition, pp 568\u2013577","DOI":"10.1109\/CVPR46437.2021.00063"},{"key":"16822_CR39","doi-asserted-by":"crossref","unstructured":"Fabian Benitez-Quiroz, C, Srinivasan, R, Martinez, AM (2016) EmotioNet: an accurate, real-time algorithm for the automatic annotation of a million facial expressions in the wild. In: IEEE conference on computer vision and pattern recognition, pp 5562\u20135570","DOI":"10.1109\/CVPR.2016.600"},{"key":"16822_CR40","unstructured":"Megvii Inc: Face++ (2019). https:\/\/www.faceplusplus.com\/"},{"key":"16822_CR41","doi-asserted-by":"crossref","unstructured":"He, K, Zhang, X, Ren, S, Sun, J (2016) Deep Residual Learning for Image Recognition. In: IEEE Conference on Computer Vision and Pattern Recognition, pp 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"issue":"15","key":"16822_CR42","doi-asserted-by":"publisher","first-page":"1454","DOI":"10.1073\/pnas.1322355111","volume":"111","author":"S Du","year":"2014","unstructured":"Du S, Tao Y, Martinez AM (2014) Compound facial expressions of emotion. Proceedings of the National Academy of Sciences 111(15):1454\u20131462","journal-title":"Proceedings of the National Academy of Sciences"},{"key":"16822_CR43","doi-asserted-by":"crossref","unstructured":"Chen, C, Li, X, Yang, L, Lin, X, Zhang, L, Wong, K-YK (2021) Progressive semantic-aware style transformation for blind face restoration. In: IEEE conference on computer vision and pattern recognition, pp 11896\u201311905","DOI":"10.1109\/CVPR46437.2021.01172"}],"container-title":["Multimedia Tools and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-023-16822-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11042-023-16822-8\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11042-023-16822-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,8]],"date-time":"2024-03-08T06:36:45Z","timestamp":1709879805000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11042-023-16822-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,9,16]]},"references-count":43,"journal-issue":{"issue":"11","published-online":{"date-parts":[[2024,3]]}},"alternative-id":["16822"],"URL":"https:\/\/doi.org\/10.1007\/s11042-023-16822-8","relation":{},"ISSN":["1573-7721"],"issn-type":[{"value":"1573-7721","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,9,16]]},"assertion":[{"value":"7 June 2022","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 June 2023","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"31 August 2023","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 September 2023","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflicts of interest"}}]}}