{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,3]],"date-time":"2025-07-03T06:30:44Z","timestamp":1751524244983},"reference-count":77,"publisher":"Springer Science and Business Media LLC","issue":"11","license":[{"start":{"date-parts":[[2022,9,16]],"date-time":"2022-09-16T00:00:00Z","timestamp":1663286400000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2022,9,16]],"date-time":"2022-09-16T00:00:00Z","timestamp":1663286400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Basic Research of Frontier Leading Technology of Jiangsu Province of China","award":["BK20192004C"],"award-info":[{"award-number":["BK20192004C"]}]},{"name":"Natural Science Foundation of Jiangsu Province of China","award":["BK20181269"],"award-info":[{"award-number":["BK20181269"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2023,11]]},"DOI":"10.1007\/s00371-022-02655-3","type":"journal-article","created":{"date-parts":[[2022,9,16]],"date-time":"2022-09-16T15:02:56Z","timestamp":1663340576000},"page":"5209-5227","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["Two-stream inter-class variation enhancement network for facial expression recognition"],"prefix":"10.1007","volume":"39","author":[{"given":"Qian","family":"Jiang","sequence":"first","affiliation":[]},{"given":"Ziyu","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Feipeng","family":"Da","sequence":"additional","affiliation":[]},{"given":"Shaoyan","family":"Gai","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,9,16]]},"reference":[{"issue":"7","key":"2655_CR1","doi-asserted-by":"publisher","first-page":"2114","DOI":"10.1109\/TCSVT.2019.2912988","volume":"30","author":"Y Ji","year":"2020","unstructured":"Ji, Y., Yang, Y., Shen, F., Shen, H.T., Li, X.: A survey of human action analysis in HRI applications. IEEE Trans. Circuits Syst. Video Technol. 30(7), 2114\u20132128 (2020). https:\/\/doi.org\/10.1109\/TCSVT.2019.2912988","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"2655_CR2","doi-asserted-by":"publisher","DOI":"10.3390\/s18124270","author":"M Jeong","year":"2018","unstructured":"Jeong, M., Ko, B.C.: Driver\u2019s facial expression recognition in real-time for safe driving. Sensors (2018). https:\/\/doi.org\/10.3390\/s18124270","journal-title":"Sensors"},{"key":"2655_CR3","doi-asserted-by":"publisher","unstructured":"Zhang, F., Zhang, T., Mao, Q., Xu, C.: Joint pose and expression modeling for facial expression recognition. In: 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3359\u20133368 (2018). https:\/\/doi.org\/10.1109\/CVPR.2018.00354","DOI":"10.1109\/CVPR.2018.00354"},{"issue":"3\u20134","key":"2655_CR4","doi-asserted-by":"publisher","first-page":"531","DOI":"10.1007\/s00779-019-01238-9","volume":"23","author":"F Kong","year":"2019","unstructured":"Kong, F.: Facial expression recognition method based on deep convolutional neural network combined with improved LBP features. Pers. Ubiquitous Comput. 23(3\u20134), 531\u2013539 (2019). https:\/\/doi.org\/10.1007\/s00779-019-01238-9","journal-title":"Pers. Ubiquitous Comput."},{"issue":"12","key":"2655_CR5","doi-asserted-by":"publisher","first-page":"2528","DOI":"10.1109\/TMM.2016.2598092","volume":"18","author":"T Zhang","year":"2016","unstructured":"Zhang, T., Zheng, W., Cui, Z., Zong, Y., Yan, J., Yan, K.: A deep neural network-driven feature learning method for multi-view facial expression recognition. IEEE Trans. Multimed. 18(12), 2528\u20132536 (2016). https:\/\/doi.org\/10.1109\/TMM.2016.2598092","journal-title":"IEEE Trans. Multimed."},{"key":"2655_CR6","unstructured":"Sun, Y., Chen, Y., Wang, X., Tang, X.: Deep learning face representation by joint identification-verification. In: Proceedings of the 27th International Conference on Neural Information Processing Systems\u2014Volume 2. NIPS\u201914, pp. 1988\u20131996. MIT Press, Cambridge (2014)"},{"issue":"5","key":"2655_CR7","doi-asserted-by":"publisher","first-page":"2439","DOI":"10.1109\/TIP.2018.2886767","volume":"28","author":"Y Li","year":"2019","unstructured":"Li, Y., Zeng, J., Shan, S., Chen, X.: Occlusion aware facial expression recognition using CNN with attention mechanism. IEEE Trans. Image Process. 28(5), 2439\u20132450 (2019). https:\/\/doi.org\/10.1109\/TIP.2018.2886767","journal-title":"IEEE Trans. Image Process."},{"key":"2655_CR8","doi-asserted-by":"publisher","first-page":"2918","DOI":"10.1007\/s10489-021-02575-0","volume":"52","author":"W Zou","year":"2021","unstructured":"Zou, W., Zhang, D., Lee, D.J.: A new multi-feature fusion based convolutional neural network for facial expression recognition. Appl. Intell. 52, 2918\u20132929 (2021)","journal-title":"Appl. Intell."},{"key":"2655_CR9","doi-asserted-by":"publisher","unstructured":"Lin, F., Hong, R., Zhou, W., Li, H.: Facial expression recognition with data augmentation and compact feature learning. In: 2018 25th IEEE International Conference on Image Processing (ICIP), pp. 1957\u20131961 (2018). https:\/\/doi.org\/10.1109\/ICIP.2018.8451039","DOI":"10.1109\/ICIP.2018.8451039"},{"key":"2655_CR10","doi-asserted-by":"publisher","unstructured":"Li, S., Deng, W., Du, J.: Reliable crowdsourcing and deep locality-preserving learning for expression recognition in the wild. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2584\u20132593 (2017). https:\/\/doi.org\/10.1109\/CVPR.2017.277","DOI":"10.1109\/CVPR.2017.277"},{"key":"2655_CR11","doi-asserted-by":"publisher","unstructured":"Goodfellow, I.J., Erhan, D., Luc Carrier, P., Courville, A.: Challenges in representation learning: a report on three machine learning contests. Neural Netw. 64, 59\u201363 (2015). https:\/\/doi.org\/10.1016\/j.neunet.2014.09.005. (Special Issue on \u201cDeep Learning of Representations\u201d)","DOI":"10.1016\/j.neunet.2014.09.005"},{"issue":"1","key":"2655_CR12","doi-asserted-by":"publisher","first-page":"356","DOI":"10.1109\/TIP.2018.2868382","volume":"28","author":"S Li","year":"2019","unstructured":"Li, S., Deng, W.: Reliable crowdsourcing and deep locality-preserving learning for unconstrained facial expression recognition. IEEE Trans. Image Process. 28(1), 356\u2013370 (2019). https:\/\/doi.org\/10.1109\/TIP.2018.2868382","journal-title":"IEEE Trans. Image Process."},{"key":"2655_CR13","doi-asserted-by":"publisher","unstructured":"Lucey, P., Cohn, J.F., Kanade, T., Saragih, J., Ambadar, Z., Matthews, I.: The extended Cohn-Kanade dataset (ck+): a complete dataset for action unit and emotion-specified expression. In: 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition\u2014Workshops, pp. 94\u2013101 (2010). https:\/\/doi.org\/10.1109\/CVPRW.2010.5543262","DOI":"10.1109\/CVPRW.2010.5543262"},{"key":"2655_CR14","doi-asserted-by":"publisher","unstructured":"Taini, M., Zhao, G., Li, S.Z., Pietikainen, M.: Facial expression recognition from near-infrared video sequences. In: 2008 19th International Conference on Pattern Recognition, pp. 1\u20134 (2008). https:\/\/doi.org\/10.1109\/ICPR.2008.4761697","DOI":"10.1109\/ICPR.2008.4761697"},{"key":"2655_CR15","doi-asserted-by":"publisher","first-page":"82","DOI":"10.1016\/j.neucom.2019.05.005","volume":"355","author":"J Shao","year":"2019","unstructured":"Shao, J., Qian, Y.: Three convolutional neural network models for facial expression recognition in the wild. Neurocomputing 355, 82\u201392 (2019). https:\/\/doi.org\/10.1016\/j.neucom.2019.05.005","journal-title":"Neurocomputing"},{"key":"2655_CR16","unstructured":"Zhao, S., Cai, H., Liu, H., Zhang, J., Chen, S.: Feature selection mechanism in CNNS for facial expression recognition. In: BMVC (2018)"},{"issue":"4","key":"2655_CR17","doi-asserted-by":"publisher","first-page":"898","DOI":"10.1109\/TCDS.2020.3034807","volume":"13","author":"H Zhang","year":"2021","unstructured":"Zhang, H., Su, W., Yu, J., Wang, Z.: Identity-expression dual branch network for facial expression recognition. IEEE Trans. Cogn. Dev. Syst. 13(4), 898\u2013911 (2021). https:\/\/doi.org\/10.1109\/TCDS.2020.3034807","journal-title":"IEEE Trans. Cogn. Dev. Syst."},{"issue":"2","key":"2655_CR18","doi-asserted-by":"publisher","first-page":"405","DOI":"10.1007\/s00371-019-01630-9","volume":"36","author":"A Agrawal","year":"2020","unstructured":"Agrawal, A., Mittal, N.: Using CNN for facial expression recognition: a study of the effects of kernel size and number of filters on accuracy. Vis. Comput. 36(2), 405\u2013412 (2020)","journal-title":"Vis. Comput."},{"key":"2655_CR19","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2019.113102","volume":"145","author":"Z Wang","year":"2020","unstructured":"Wang, Z.: A new clustering method based on morphological operations. Expert Syst. Appl. 145, 113102 (2020)","journal-title":"Expert Syst. Appl."},{"key":"2655_CR20","doi-asserted-by":"publisher","DOI":"10.3390\/s21093046","author":"S Minaee","year":"2021","unstructured":"Minaee, S., Minaei, M., Abdolrashidi, A.: Deep-emotion: facial expression recognition using attentional convolutional network. Sensors (2021). https:\/\/doi.org\/10.3390\/s21093046","journal-title":"Sensors"},{"key":"2655_CR21","doi-asserted-by":"crossref","unstructured":"Liang, X., Xu, L., Zhang, W., Zhang, Y., Liu, J., Liu, Z.: A convolution-transformer dual branch network for head-pose and occlusion facial expression recognition. Vis. Comput. 1\u201314 (2022)","DOI":"10.1007\/s00371-022-02413-5"},{"key":"2655_CR22","doi-asserted-by":"crossref","unstructured":"Saurav, S., Gidde, P., Saini, R., Singh, S.: Dual integrated convolutional neural network for real-time facial expression recognition in the wild. Vis. Comput. 1\u201314 (2021)","DOI":"10.1007\/s00371-021-02069-7"},{"issue":"8","key":"2655_CR23","doi-asserted-by":"publisher","first-page":"1635","DOI":"10.1007\/s00371-019-01759-7","volume":"36","author":"X Liu","year":"2020","unstructured":"Liu, X., Zhou, F.: Improved curriculum learning using SSM for facial expression recognition. Vis. Comput. 36(8), 1635\u20131649 (2020)","journal-title":"Vis. Comput."},{"key":"2655_CR24","doi-asserted-by":"publisher","first-page":"38528","DOI":"10.1109\/ACCESS.2020.2964752","volume":"8","author":"G Zhao","year":"2020","unstructured":"Zhao, G., Yang, H., Yu, M.: Expression recognition method based on a lightweight convolutional neural network. IEEE Access 8, 38528\u201338537 (2020). https:\/\/doi.org\/10.1109\/ACCESS.2020.2964752","journal-title":"IEEE Access"},{"key":"2655_CR25","doi-asserted-by":"publisher","first-page":"64827","DOI":"10.1109\/ACCESS.2019.2917266","volume":"7","author":"M-I Georgescu","year":"2019","unstructured":"Georgescu, M.-I., Ionescu, R.T., Popescu, M.: Local learning with deep and handcrafted features for facial expression recognition. IEEE Access 7, 64827\u201364836 (2019). https:\/\/doi.org\/10.1109\/ACCESS.2019.2917266","journal-title":"IEEE Access"},{"issue":"1","key":"2655_CR26","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1109\/TMM.2018.2844085","volume":"21","author":"S Xie","year":"2019","unstructured":"Xie, S., Hu, H.: Facial expression recognition using hierarchical features with deep comprehensive multipatches aggregation convolutional neural networks. IEEE Trans. Multimed. 21(1), 211\u2013220 (2019). https:\/\/doi.org\/10.1109\/TMM.2018.2844085","journal-title":"IEEE Trans. Multimed."},{"issue":"5","key":"2655_CR27","doi-asserted-by":"publisher","first-page":"3211","DOI":"10.1007\/s11227-018-2554-8","volume":"76","author":"H Wang","year":"2020","unstructured":"Wang, H., Wei, S., Fang, B.: Facial expression recognition using iterative fusion of MO-HOG and deep features. J. Supercomput. 76(5), 3211\u20133221 (2020)","journal-title":"J. Supercomput."},{"key":"2655_CR28","doi-asserted-by":"publisher","DOI":"10.3390\/s20041087","author":"MN Riaz","year":"2020","unstructured":"Riaz, M.N., Shen, Y., Sohail, M., Guo, M.: exnet: an efficient approach for emotion recognition in the wild. Sensors (2020). https:\/\/doi.org\/10.3390\/s20041087","journal-title":"Sensors"},{"issue":"3","key":"2655_CR29","doi-asserted-by":"publisher","first-page":"499","DOI":"10.1007\/s00371-019-01636-3","volume":"36","author":"D Liang","year":"2020","unstructured":"Liang, D., Liang, H., Yu, Z., Zhang, Y.: Deep convolutional BiLSTM fusion network for facial expression recognition. Vis. Comput. 36(3), 499\u2013508 (2020)","journal-title":"Vis. Comput."},{"key":"2655_CR30","doi-asserted-by":"publisher","first-page":"7383","DOI":"10.1109\/ACCESS.2020.2963913","volume":"8","author":"Y Gan","year":"2020","unstructured":"Gan, Y., Chen, J., Yang, Z., Xu, L.: Multiple attention network for facial expression recognition. IEEE Access 8, 7383\u20137393 (2020)","journal-title":"IEEE Access"},{"key":"2655_CR31","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2020.3027340","author":"W Chen","year":"2020","unstructured":"Chen, W., Zhang, D., Li, M., Lee, D.-J.: Stcam: spatial-temporal and channel attention module for dynamic facial expression recognition. IEEE Trans. Affect. Comput. (2020). https:\/\/doi.org\/10.1109\/TAFFC.2020.3027340","journal-title":"IEEE Trans. Affect. Comput."},{"key":"2655_CR32","doi-asserted-by":"publisher","first-page":"378","DOI":"10.1016\/j.neucom.2019.11.127","volume":"444","author":"X Sun","year":"2021","unstructured":"Sun, X., Xia, P., Ren, F.: Multi-attention based deep neural network with hybrid features for dynamic sequential facial expression recognition. Neurocomputing 444, 378\u2013389 (2021). https:\/\/doi.org\/10.1016\/j.neucom.2019.11.127","journal-title":"Neurocomputing"},{"key":"2655_CR33","doi-asserted-by":"publisher","first-page":"340","DOI":"10.1016\/j.neucom.2020.06.014","volume":"411","author":"J Li","year":"2020","unstructured":"Li, J., Jin, K., Zhou, D., Kubota, N., Ju, Z.: Attention mechanism-based CNN for facial expression recognition. Neurocomputing 411, 340\u2013350 (2020). https:\/\/doi.org\/10.1016\/j.neucom.2020.06.014","journal-title":"Neurocomputing"},{"issue":"3","key":"2655_CR34","doi-asserted-by":"publisher","first-page":"1431","DOI":"10.1109\/TCSVT.2021.3073558","volume":"32","author":"M Huang","year":"2022","unstructured":"Huang, M., Zhang, X., Lan, X., Wang, H., Tang, Y.: Convolution by multiplication: accelerated two-stream Fourier domain convolutional neural network for facial expression recognition. IEEE Trans. Circuits Syst. Video Technol. 32(3), 1431\u20131442 (2022). https:\/\/doi.org\/10.1109\/TCSVT.2021.3073558","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"3","key":"2655_CR35","doi-asserted-by":"publisher","first-page":"1473","DOI":"10.1109\/TSMC.2019.2897330","volume":"51","author":"M Wu","year":"2021","unstructured":"Wu, M., Su, W., Chen, L., Liu, Z., Cao, W., Hirota, K.: Weight-adapted convolution neural network for facial expression recognition in human\u2013robot interaction. IEEE Trans. Syst. Man Cybern. Syst. 51(3), 1473\u20131484 (2021). https:\/\/doi.org\/10.1109\/TSMC.2019.2897330","journal-title":"IEEE Trans. Syst. Man Cybern. Syst."},{"issue":"3","key":"2655_CR36","doi-asserted-by":"publisher","first-page":"1443","DOI":"10.1109\/TCSVT.2021.3074032","volume":"32","author":"Y Xia","year":"2022","unstructured":"Xia, Y., Zheng, W., Wang, Y., Yu, H., Dong, J., Wang, F.-Y.: Local and global perception generative adversarial network for facial expression synthesis. IEEE Trans. Circuits Syst. Video Technol. 32(3), 1443\u20131452 (2022). https:\/\/doi.org\/10.1109\/TCSVT.2021.3074032","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"2655_CR37","doi-asserted-by":"publisher","first-page":"6544","DOI":"10.1109\/TIP.2021.3093397","volume":"30","author":"Z Zhao","year":"2021","unstructured":"Zhao, Z., Liu, Q., Wang, S.: Learning deep global multi-scale and local attention features for facial expression recognition in the wild. IEEE Trans. Image Process. 30, 6544\u20136556 (2021). https:\/\/doi.org\/10.1109\/TIP.2021.3093397","journal-title":"IEEE Trans. Image Process."},{"issue":"2","key":"2655_CR38","doi-asserted-by":"publisher","first-page":"263","DOI":"10.1007\/s11760-020-01753-w","volume":"15","author":"D Zhu","year":"2021","unstructured":"Zhu, D., Tian, G., Zhu, L., Wang, W., Wang, B., Li, C.: Lkrnet: a dual-branch network based on local key regions for facial expression recognition. SIViP 15(2), 263\u2013270 (2021)","journal-title":"SIViP"},{"key":"2655_CR39","doi-asserted-by":"publisher","unstructured":"Schroff, F., Kalenichenko, D., Philbin, J.: Facenet: a unified embedding for face recognition and clustering. In: 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 815\u2013823 (2015). https:\/\/doi.org\/10.1109\/CVPR.2015.7298682","DOI":"10.1109\/CVPR.2015.7298682"},{"key":"2655_CR40","doi-asserted-by":"publisher","first-page":"549","DOI":"10.1007\/s10489-020-01855-5","volume":"51","author":"J Shao","year":"2020","unstructured":"Shao, J., Cheng, Q.: E-FCNN for tiny facial expression recognition. Appl. Intell. 51, 549\u2013559 (2020)","journal-title":"Appl. Intell."},{"key":"2655_CR41","doi-asserted-by":"publisher","DOI":"10.1016\/j.image.2021.116321","volume":"96","author":"K-Y Tsai","year":"2021","unstructured":"Tsai, K.-Y., Tsai, Y.-W., Lee, Y.-C., Ding, J.-J., Chang, R.Y.: Frontalization and adaptive exponential ensemble rule for deep-learning-based facial expression recognition system. Signal Process. Image Commun. 96, 116321 (2021). https:\/\/doi.org\/10.1016\/j.image.2021.116321","journal-title":"Signal Process. Image Commun."},{"key":"2655_CR42","doi-asserted-by":"publisher","unstructured":"Liu, X., Jin, L., Han, X., Lu, J., You, J., Kong, L.: Identity-aware facial expression recognition in compressed video. In: 2020 25th International Conference on Pattern Recognition (ICPR), pp. 7508\u20137514 (2021). https:\/\/doi.org\/10.1109\/ICPR48806.2021.9412820","DOI":"10.1109\/ICPR48806.2021.9412820"},{"key":"2655_CR43","doi-asserted-by":"crossref","unstructured":"Zeng, J., Shan, S., Chen, X.: Facial expression recognition with inconsistently annotated datasets. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 222\u2013237 (2018)","DOI":"10.1007\/978-3-030-01261-8_14"},{"key":"2655_CR44","doi-asserted-by":"publisher","unstructured":"Wu, W., Yin, Y., Wang, Y., Wang, X., Xu, D.: Facial expression recognition for different pose faces based on special landmark detection. In: 2018 24th International Conference on Pattern Recognition (ICPR), pp. 1524\u20131529 (2018). https:\/\/doi.org\/10.1109\/ICPR.2018.8545725","DOI":"10.1109\/ICPR.2018.8545725"},{"key":"2655_CR45","doi-asserted-by":"publisher","unstructured":"Ming, Z., Chazalon, J., Muzzamil\u00a0Luqman, M., Visani, M., Burie, J.-C.: Facelivenet: end-to-end networks combining face verification with interactive facial expression-based liveness detection. In: 2018 24th International Conference on Pattern Recognition (ICPR), pp. 3507\u20133512 (2018). https:\/\/doi.org\/10.1109\/ICPR.2018.8545274","DOI":"10.1109\/ICPR.2018.8545274"},{"key":"2655_CR46","doi-asserted-by":"publisher","unstructured":"Li, Y., Zeng, J., Shan, S., Chen, X.: Patch-gated CNN for occlusion-aware facial expression recognition. In: 2018 24th International Conference on Pattern Recognition (ICPR), pp. 2209\u20132214 (2018). https:\/\/doi.org\/10.1109\/ICPR.2018.8545853","DOI":"10.1109\/ICPR.2018.8545853"},{"key":"2655_CR47","unstructured":"Wang, W., Sun, Q., Chen, T., Cao, C., Zheng, Z., Xu, G., Qiu, H., Fu, Y.: A Fine-Grained Facial Expression Database for End-to-End Multi-Pose Facial Expression Recognition (2019)"},{"key":"2655_CR48","doi-asserted-by":"publisher","DOI":"10.1007\/s00371-022-02483-5","author":"C Liu","year":"2022","unstructured":"Liu, C., Liu, X., Chen, C., Wang, Q.: Soft thresholding squeeze-and-excitation network for pose-invariant facial expression recognition. Vis. Comput. (2022). https:\/\/doi.org\/10.1007\/s00371-022-02483-5","journal-title":"Vis. Comput."},{"issue":"6","key":"2655_CR49","doi-asserted-by":"publisher","first-page":"2359","DOI":"10.1109\/TCSVT.2020.3024201","volume":"31","author":"S Xie","year":"2021","unstructured":"Xie, S., Hu, H., Chen, Y.: Facial expression recognition with two-branch disentangled generative adversarial network. IEEE Trans. Circuits Syst. Video Technol. 31(6), 2359\u20132371 (2021). https:\/\/doi.org\/10.1109\/TCSVT.2020.3024201","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"2655_CR50","doi-asserted-by":"publisher","unstructured":"Ali, K., Hughes, C.E.: Facial expression recognition by using a disentangled identity-invariant expression representation. In: 2020 25th International Conference on Pattern Recognition (ICPR), pp. 9460\u20139467 (2021). https:\/\/doi.org\/10.1109\/ICPR48806.2021.9412172","DOI":"10.1109\/ICPR48806.2021.9412172"},{"key":"2655_CR51","doi-asserted-by":"publisher","unstructured":"Yang, H., Ciftci, U., Yin, L.: Facial expression recognition by de-expression residue learning. In: 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2168\u20132177 (2018). https:\/\/doi.org\/10.1109\/CVPR.2018.00231","DOI":"10.1109\/CVPR.2018.00231"},{"issue":"5","key":"2655_CR52","doi-asserted-by":"publisher","first-page":"2787","DOI":"10.1109\/TCYB.2019.2925095","volume":"51","author":"W Xie","year":"2021","unstructured":"Xie, W., Shen, L., Duan, J.: Adaptive weighting of handcrafted feature losses for facial expression recognition. IEEE Trans. Cybern. 51(5), 2787\u20132800 (2021). https:\/\/doi.org\/10.1109\/TCYB.2019.2925095","journal-title":"IEEE Trans. Cybern."},{"key":"2655_CR53","doi-asserted-by":"publisher","unstructured":"Tian, Y., Wen, Z., Xie, W., Zhang, X., Shen, L., Duan, J.: Outlier-suppressed triplet loss with adaptive class-aware margins for facial expression recognition. In: 2019 IEEE International Conference on Image Processing (ICIP), pp. 46\u201350 (2019). https:\/\/doi.org\/10.1109\/ICIP.2019.8802918","DOI":"10.1109\/ICIP.2019.8802918"},{"issue":"2","key":"2655_CR54","doi-asserted-by":"publisher","first-page":"391","DOI":"10.1007\/s00371-019-01627-4","volume":"36","author":"K Li","year":"2020","unstructured":"Li, K., Jin, Y., Akram, M.W., Han, R., Chen, J.: Facial expression recognition with convolutional neural networks via a new face cropping and rotation strategy. Vis. Comput. 36(2), 391\u2013404 (2020)","journal-title":"Vis. Comput."},{"key":"2655_CR55","doi-asserted-by":"publisher","unstructured":"Cai, J., Meng, Z., Khan, A.S., Li, Z., O\u2019Reilly, J., Tong, Y.: Island loss for learning discriminative features in facial expression recognition. In: 2018 13th IEEE International Conference on Automatic Face Gesture Recognition (FG 2018), pp. 302\u2013309 (2018). https:\/\/doi.org\/10.1109\/FG.2018.00051","DOI":"10.1109\/FG.2018.00051"},{"key":"2655_CR56","doi-asserted-by":"publisher","unstructured":"Meng, Z., Liu, P., Cai, J., Han, S., Tong, Y.: Identity-aware convolutional neural network for facial expression recognition. In: 2017 12th IEEE International Conference on Automatic Face Gesture Recognition (FG 2017), pp. 558\u2013565 (2017). https:\/\/doi.org\/10.1109\/FG.2017.140","DOI":"10.1109\/FG.2017.140"},{"key":"2655_CR57","doi-asserted-by":"crossref","unstructured":"Li, S., Deng, W., Du, J.: Reliable crowdsourcing and deep locality-preserving learning for expression recognition in the wild. In: 2017 IEEE Conference On Computer Vision and Pattern Recognition (CVPR), pp. 2584\u20132593. IEEE (2017)","DOI":"10.1109\/CVPR.2017.277"},{"key":"2655_CR58","doi-asserted-by":"publisher","unstructured":"Farzaneh, A.H., Qi, X.: Discriminant distribution-agnostic loss for facial expression recognition in the wild. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 1631\u20131639 (2020). https:\/\/doi.org\/10.1109\/CVPRW50498.2020.00211","DOI":"10.1109\/CVPRW50498.2020.00211"},{"issue":"5","key":"2655_CR59","doi-asserted-by":"publisher","first-page":"3178","DOI":"10.1109\/TCSVT.2021.3103760","volume":"32","author":"Y Li","year":"2022","unstructured":"Li, Y., Lu, Y., Chen, B., Zhang, Z., Li, J., Lu, G., Zhang, D.: Learning informative and discriminative features for facial expression recognition in the wild. IEEE Trans. Circuits Syst. Video Technol. 32(5), 3178\u20133189 (2022). https:\/\/doi.org\/10.1109\/TCSVT.2021.3103760","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"issue":"3","key":"2655_CR60","doi-asserted-by":"publisher","first-page":"483","DOI":"10.1007\/s00371-019-01635-4","volume":"36","author":"F An","year":"2020","unstructured":"An, F., Liu, Z.: Facial expression recognition algorithm based on parameter adaptive initialization of CNN and LSTM. Vis. Comput. 36(3), 483\u2013498 (2020)","journal-title":"Vis. Comput."},{"key":"2655_CR61","doi-asserted-by":"publisher","unstructured":"Zhou, B., Cui, Q., Wei, X.-S., Chen, Z.-M.: BBN: bilateral-branch network with cumulative learning for long-tailed visual recognition. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 9716\u20139725 (2020). https:\/\/doi.org\/10.1109\/CVPR42600.2020.00974","DOI":"10.1109\/CVPR42600.2020.00974"},{"key":"2655_CR62","doi-asserted-by":"publisher","unstructured":"Li, Y., Gao, Y., Chen, B., Zhang, Z., Lu, G., Zhang, D.: Self-supervised exclusive-inclusive interactive learning for multi-label facial expression recognition in the wild. IEEE Trans. Circuits Syst. Video Technol. 32(5), 3190\u20133202 (2022). https:\/\/doi.org\/10.1109\/TCSVT.2021.3103782","DOI":"10.1109\/TCSVT.2021.3103782"},{"issue":"5","key":"2655_CR63","doi-asserted-by":"publisher","first-page":"1100","DOI":"10.1109\/TPAMI.2016.2637331","volume":"40","author":"Z Xu","year":"2018","unstructured":"Xu, Z., Huang, S., Zhang, Y., Tao, D.: Webly-supervised fine-grained visual categorization via deep domain adaptation. IEEE Trans. Pattern Anal. Mach. Intell. 40(5), 1100\u20131113 (2018). https:\/\/doi.org\/10.1109\/TPAMI.2016.2637331","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"2655_CR64","doi-asserted-by":"publisher","unstructured":"Zhong, L., Bai, C., Li, J., Chen, T., Li, S., Liu, Y.: A graph-structured representation with BRNN for static-based facial expression recognition. In: 2019 14th IEEE International Conference on Automatic Face Gesture Recognition (FG 2019), pp. 1\u20135 (2019). https:\/\/doi.org\/10.1109\/FG.2019.8756615","DOI":"10.1109\/FG.2019.8756615"},{"issue":"1","key":"2655_CR65","doi-asserted-by":"publisher","first-page":"97","DOI":"10.1007\/s00371-018-1585-8","volume":"36","author":"I Gogi\u0107","year":"2020","unstructured":"Gogi\u0107, I., Manhart, M., Pand\u017ei\u0107, I.S., Ahlberg, J.: Fast facial expression recognition using local binary features and shallow neural networks. Vis. Comput. 36(1), 97\u2013112 (2020)","journal-title":"Vis. Comput."},{"key":"2655_CR66","doi-asserted-by":"publisher","first-page":"50","DOI":"10.1016\/j.neucom.2018.07.028","volume":"317","author":"Z Yu","year":"2018","unstructured":"Yu, Z., Liu, G., Liu, Q., Deng, J.: Spatio-temporal convolutional features with nested LSTM for facial expression recognition. Neurocomputing 317, 50\u201357 (2018). https:\/\/doi.org\/10.1016\/j.neucom.2018.07.028","journal-title":"Neurocomputing"},{"key":"2655_CR67","doi-asserted-by":"publisher","first-page":"3","DOI":"10.1007\/978-3-030-01234-2_1","volume-title":"Computer Vision\u2014ECCV 2018","author":"S Woo","year":"2018","unstructured":"Woo, S., Park, J., Lee, J.-Y., Kweon, I.S.: CBAM: convolutional block attention module. In: Ferrari, V., Hebert, M., Sminchisescu, C., Weiss, Y. (eds.) Computer Vision\u2014ECCV 2018, pp. 3\u201319. Springer, Cham (2018)"},{"key":"2655_CR68","doi-asserted-by":"publisher","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770\u2013778 (2016). https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"2655_CR69","doi-asserted-by":"publisher","unstructured":"Cui, Y., Jia, M., Lin, T.-Y., Song, Y., Belongie, S.: Class-balanced loss based on effective number of samples. In: 2019 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 9260\u20139269 (2019). https:\/\/doi.org\/10.1109\/CVPR.2019.00949","DOI":"10.1109\/CVPR.2019.00949"},{"key":"2655_CR70","doi-asserted-by":"publisher","first-page":"4057","DOI":"10.1109\/TIP.2019.2956143","volume":"29","author":"K Wang","year":"2020","unstructured":"Wang, K., Peng, X., Yang, J., Meng, D., Qiao, Y.: Region attention networks for pose and occlusion robust facial expression recognition. IEEE Trans. Image Process. 29, 4057\u20134069 (2020). https:\/\/doi.org\/10.1109\/TIP.2019.2956143","journal-title":"IEEE Trans. Image Process."},{"key":"2655_CR71","doi-asserted-by":"publisher","unstructured":"Qian, D., Zhou, L., Wang, Y., Wu, C.: Expression recognition based on multiple feature fusion-based convolutional neural network. In: 2021 IEEE Conference on Cognitive and Computational Aspects of Situation Management (CogSIMA), pp. 66\u201372 (2021). https:\/\/doi.org\/10.1109\/CogSIMA51574.2021.9475948","DOI":"10.1109\/CogSIMA51574.2021.9475948"},{"key":"2655_CR72","unstructured":"Florea, C., Florea, L.M., Badea, M.-S., Vertan, C., Racoviteanu, A.: Annealed label transfer for face expression recognition. In: BMVC (2019)"},{"issue":"2","key":"2655_CR73","doi-asserted-by":"publisher","first-page":"318","DOI":"10.1109\/TPAMI.2018.2858826","volume":"42","author":"T-Y Lin","year":"2020","unstructured":"Lin, T.-Y., Goyal, P., Girshick, R., He, K., Dollr, P.: Focal loss for dense object detection. IEEE Trans. Pattern Anal. Mach. Intell. 42(2), 318\u2013327 (2020). https:\/\/doi.org\/10.1109\/TPAMI.2018.2858826","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"5","key":"2655_CR74","doi-asserted-by":"publisher","first-page":"1213","DOI":"10.1109\/TCSS.2020.3013938","volume":"8","author":"T Ni","year":"2021","unstructured":"Ni, T., Zhang, C., Gu, X.: Transfer model collaborating metric learning and dictionary learning for cross-domain facial expression recognition. IEEE Trans. Comput. Soc. Syst. 8(5), 1213\u20131222 (2021). https:\/\/doi.org\/10.1109\/TCSS.2020.3013938","journal-title":"IEEE Trans. Comput. Soc. Syst."},{"key":"2655_CR75","doi-asserted-by":"publisher","unstructured":"Chen, T., Pu, T., Wu, H., Xie, Y., Liu, L., Lin, L.: Cross-domain facial expression recognition: A unified evaluation benchmark and adversarial graph learning. IEEE Trans. Pattern Anal. Mach. Intell. (2021). https:\/\/doi.org\/10.1109\/TPAMI.2021.3131222","DOI":"10.1109\/TPAMI.2021.3131222"},{"key":"2655_CR76","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2021.3136606","author":"Y Ji","year":"2021","unstructured":"Ji, Y., Hu, Y., Yang, Y., Shen, H.T.: Region attention enhanced unsupervised cross-domain facial emotion recognition. IEEE Trans. Knowl. Data Eng. (2021). https:\/\/doi.org\/10.1109\/TKDE.2021.3136606","journal-title":"IEEE Trans. Knowl. Data Eng."},{"key":"2655_CR77","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3141604","author":"Y Li","year":"2022","unstructured":"Li, Y., Zhang, Z., Chen, B., Lu, G., Zhang, D.: Deep margin-sensitive representation learning for cross-domain facial expression recognition. IEEE Trans. Multim. (2022). https:\/\/doi.org\/10.1109\/TMM.2022.3141604","journal-title":"IEEE Trans. Multim."}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-022-02655-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-022-02655-3\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-022-02655-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,10,27]],"date-time":"2023-10-27T15:04:28Z","timestamp":1698419068000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-022-02655-3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,9,16]]},"references-count":77,"journal-issue":{"issue":"11","published-print":{"date-parts":[[2023,11]]}},"alternative-id":["2655"],"URL":"https:\/\/doi.org\/10.1007\/s00371-022-02655-3","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"value":"0178-2789","type":"print"},{"value":"1432-2315","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022,9,16]]},"assertion":[{"value":"18 August 2022","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 September 2022","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}