{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,18]],"date-time":"2026-03-18T21:16:10Z","timestamp":1773868570959,"version":"3.50.1"},"reference-count":62,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2024,11,13]],"date-time":"2024-11-13T00:00:00Z","timestamp":1731456000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2024,11,13]],"date-time":"2024-11-13T00:00:00Z","timestamp":1731456000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62166008"],"award-info":[{"award-number":["62166008"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Complex Intell. Syst."],"published-print":{"date-parts":[[2025,1]]},"DOI":"10.1007\/s40747-024-01608-8","type":"journal-article","created":{"date-parts":[[2024,11,13]],"date-time":"2024-11-13T05:01:42Z","timestamp":1731474102000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["LCANet: a model for analysis of students real-time sentiment by integrating attention mechanism and joint loss function"],"prefix":"10.1007","volume":"11","author":[{"given":"Pengyun","family":"Hu","sequence":"first","affiliation":[]},{"given":"Xianpiao","family":"Tang","sequence":"additional","affiliation":[]},{"given":"Liu","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Chuijian","family":"Kong","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3629-3672","authenticated-orcid":false,"given":"Daoxun","family":"Xia","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,13]]},"reference":[{"issue":"22","key":"1608_CR1","doi-asserted-by":"publisher","first-page":"31581","DOI":"10.1007\/s11042-019-07959-6","volume":"78","author":"G Yolcu","year":"2019","unstructured":"Yolcu G, Oztel I, Kazan S, Oz C, Palaniappan K, Lever TE, Bunyak F (2019) Facial expression recognition for monitoring neurological disorders based on convolutional neural network. Multimed Tools Appl 78(22):31581\u201331603. https:\/\/doi.org\/10.1007\/s11042-019-07959-6","journal-title":"Multimed Tools Appl"},{"issue":"6","key":"1608_CR2","doi-asserted-by":"publisher","first-page":"6927","DOI":"10.1007\/s40747-023-01338-3","volume":"9","author":"K Zaman","year":"2023","unstructured":"Zaman K, Zhaoyun S, Shah B, Hussain T, Shah SM, Ali F, Khan US (2023) A novel driver emotion recognition system based on deep ensemble classification. Complex Intell Syst 9(6):6927\u20136952. https:\/\/doi.org\/10.1007\/s40747-023-01338-3","journal-title":"Complex Intell Syst"},{"key":"1608_CR3","doi-asserted-by":"publisher","unstructured":"Liu T, Liu Z, Chai Y, Wang J, Yuanyi W (2021) Agent affective computing in human-computer interaction. J Image Graph 26(12):2767\u20132777. https:\/\/doi.org\/10.11834\/jig.200498","DOI":"10.11834\/jig.200498"},{"key":"1608_CR4","doi-asserted-by":"publisher","unstructured":"Yu W, Liang M, Wang X, Chen Z, Cao X (2022) Student expression recognition and intelligent teaching evaluation in classroom teaching videos based on deep attention network. J Comput Appl 42(03):743\u2013749. https:\/\/doi.org\/10.11772\/j.issn.1001-9081.2021040846","DOI":"10.11772\/j.issn.1001-9081.2021040846"},{"key":"1608_CR5","doi-asserted-by":"publisher","unstructured":". Santra B, Mukherjee DP (2016) Local dominant binary patterns for recognition of multi-view facial expressions. In: 10th Indian Conference on Computer Vision, Graphics and Image Processing (ICVGIP 2016). https:\/\/doi.org\/10.1145\/3009977.3010008","DOI":"10.1145\/3009977.3010008"},{"key":"1608_CR6","doi-asserted-by":"publisher","unstructured":"Viola P, Jones M (2001) Rapid object detection using a boosted cascade of simple features. In: Jacobs A, Baldwin T (eds) IEEE Conference on Computer Vision and Pattern Recognition, pp 511\u2013518. https:\/\/doi.org\/10.1109\/cvpr.2001.990517","DOI":"10.1109\/cvpr.2001.990517"},{"issue":"1","key":"1608_CR7","doi-asserted-by":"publisher","first-page":"172","DOI":"10.1109\/TIP.2006.884954","volume":"16","author":"I Kotsia","year":"2007","unstructured":"Kotsia I, Pitas I (2007) Facial expression recognition in image sequences using geometric deformation features and support vector machines. IEEE Trans Image Process (TIP) 16(1):172\u2013187. https:\/\/doi.org\/10.1109\/TIP.2006.884954","journal-title":"IEEE Trans Image Process (TIP)"},{"key":"1608_CR8","doi-asserted-by":"publisher","unstructured":"Sohail ASM, Bhattacharya P (2007) Classification of facial expressions using K-nearest neighbor classifier. In: 3rd International Conference on Computer Vision\/Computer Graphics (MIRAGE 2007), pp 555+. https:\/\/doi.org\/10.1007\/978-3-540-71457-6","DOI":"10.1007\/978-3-540-71457-6"},{"key":"1608_CR9","doi-asserted-by":"publisher","first-page":"64978","DOI":"10.1109\/ACCESS.2019.2917230","volume":"7","author":"L Mao","year":"2019","unstructured":"Mao L, Wang N, Wang L, Chen Y (2019) Classroom micro-expression recognition algorithms based on multi-feature fusion. IEEE Access 7:64978\u201364983. https:\/\/doi.org\/10.1109\/ACCESS.2019.2917230","journal-title":"IEEE Access"},{"key":"1608_CR10","unstructured":"Mnih V, Heess N, Graves A, Kavukcuoglu K (2014) Recurrent Models of Visual Attention. arXiv:1406.6247"},{"key":"1608_CR11","doi-asserted-by":"publisher","unstructured":"Woo S, Park J, Lee JY, Kweon IS (2018) CBAM: Convolutional Block Attention Module. In: COMPUTER VISION - ECCV 2018, PT VII, pp 3\u201319. https:\/\/doi.org\/10.1007\/978-3-030-01234-2","DOI":"10.1007\/978-3-030-01234-2"},{"issue":"7","key":"1608_CR12","doi-asserted-by":"publisher","first-page":"4820","DOI":"10.1109\/TII.2021.3129629","volume":"18","author":"M Wieczorek","year":"2022","unstructured":"Wieczorek M, Sika J, Woniak M, Garg S, Hassan MM (2022) lightweight convolutional neural network model for human face detection in risk situations. IEEE Trans Ind Inform 18(7):4820\u20134829. https:\/\/doi.org\/10.1109\/TII.2021.3129629","journal-title":"IEEE Trans Ind Inform"},{"issue":"21","key":"1608_CR13","doi-asserted-by":"publisher","DOI":"10.1002\/cpe.7059","volume":"34","author":"X Tian","year":"2022","unstructured":"Tian X, Tang S, Zhu H, Xia D (2022) Real-time sentiment analysis of students based on mini-Xception architecture for wisdom classroom. Concurr Comput Pract Exp 34(21):e7059. https:\/\/doi.org\/10.1002\/cpe.7059","journal-title":"Concurr Comput Pract Exp"},{"issue":"22","key":"1608_CR14","doi-asserted-by":"publisher","DOI":"10.1002\/cpe.7727","volume":"35","author":"H Zhu","year":"2023","unstructured":"Zhu H, Hu P, Tang X, Xia D, Huang H (2023) NAGNet: A novel framework for real-time students\u2019 sentiment analysis in the wisdom classroom. Concurrency and Computation: Practice and Experience 35(22):e7727. https:\/\/doi.org\/10.1002\/cpe.7727","journal-title":"Concurrency and Computation: Practice and Experience"},{"key":"1608_CR15","doi-asserted-by":"publisher","unstructured":"Barsoum E, Zhang C, Ferrer CC, Zhang Z (2016) Training deep networks for facial expression recognition with crowd-sourced label distribution. In: Proceedings of the 18th ACM International Conference on Multimodal Interaction(ICMI), pp. 279\u2013283. https:\/\/doi.org\/10.1145\/2993148.2993165","DOI":"10.1145\/2993148.2993165"},{"issue":"1","key":"1608_CR16","doi-asserted-by":"publisher","first-page":"356","DOI":"10.1109\/TIP.2018.2868382","volume":"28","author":"S Li","year":"2019","unstructured":"Li S, Deng W (2019) Reliable crowdsourcing and deep locality-preserving learning for unconstrained facial expression recognition. IEEE Trans Image Process 28(1):356\u2013370. https:\/\/doi.org\/10.1109\/TIP.2018.2868382","journal-title":"IEEE Trans Image Process"},{"issue":"1","key":"1608_CR17","doi-asserted-by":"publisher","first-page":"18","DOI":"10.1109\/TAFFC.2017.2740923","volume":"10","author":"A Mollahosseini","year":"2017","unstructured":"Mollahosseini A, Hasani B, Mahoor MH (2017) Affectnet: a database for facial expression, valence, and arousal computing in the wild. IEEE Trans Affect Comput 10(1):18\u201331. https:\/\/doi.org\/10.1109\/TAFFC.2017.2740923","journal-title":"IEEE Trans Affect Comput"},{"issue":"4","key":"1608_CR18","doi-asserted-by":"publisher","first-page":"3553","DOI":"10.1007\/s11760-024-03020-8","volume":"18","author":"Z Zhao","year":"2024","unstructured":"Zhao Z, Li Y, Yang J, Ma Y (2024) A lightweight facial expression recognition model for automated engagement detection. Signal Image Video Process 18(4):3553\u20133563. https:\/\/doi.org\/10.1007\/s11760-024-03020-8","journal-title":"Signal Image Video Process"},{"key":"1608_CR19","doi-asserted-by":"publisher","unstructured":"Woo S, Debnath S, Hu R, Chen X, Liu Z, Kweon IS, Xie S (2023) ConvNeXt V2: co-designing and scaling convnets with masked autoencoders. In: 2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 16133\u201316142. https:\/\/doi.org\/10.1109\/CVPR52729.2023.01548","DOI":"10.1109\/CVPR52729.2023.01548"},{"key":"1608_CR20","doi-asserted-by":"publisher","unstructured":"Liu Z, Mao H, Wu CY, Feichtenhofer C, Darrell T, Xie S (2022) A ConvNet for the 2020s. In: 2022 IEEE\/CVF conference on computer vision and pattern recognition (CVPR), pp 11966\u201311976. https:\/\/doi.org\/10.1109\/CVPR52688.2022.01167","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"1608_CR21","doi-asserted-by":"publisher","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: 2016 IEEE conference on computer vision and pattern recognition (CVPR), pp 770\u2013778. https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"1608_CR22","doi-asserted-by":"crossref","unstructured":"Liu Z, Lin Y, Cao Y, Hu H, Wei Y, Zhang Z, Lin S, Guo B (2021) Swin transformer: hierarchical vision transformer using shifted windows. arxiv:2103.14030","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"1608_CR23","doi-asserted-by":"publisher","unstructured":"Hu J, Shen L, Sun G (2018) Squeeze-and-excitation networks. In: 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 7132\u20137141. https:\/\/doi.org\/10.1109\/CVPR.2018.00745","DOI":"10.1109\/CVPR.2018.00745"},{"key":"1608_CR24","doi-asserted-by":"publisher","unstructured":"Zheng W, Luo X, Meng Z (2022) Facial expression recognition method based on improved lightweight rank expansion network. Comput Eng 48(09):189\u2013196. https:\/\/doi.org\/10.19678\/j.issn.1000-3428.0062811","DOI":"10.19678\/j.issn.1000-3428.0062811"},{"issue":"5","key":"1608_CR25","doi-asserted-by":"publisher","first-page":"2439","DOI":"10.1109\/TIP.2018.2886767","volume":"28","author":"Y Li","year":"2019","unstructured":"Li Y, Zeng J, Shan S, Chen X (2019) Occlusion aware facial expression recognition using CNN with attention mechanism. IEEE Trans Image Process 28(5):2439\u20132450. https:\/\/doi.org\/10.1109\/TIP.2018.2886767","journal-title":"IEEE Trans Image Process"},{"key":"1608_CR26","doi-asserted-by":"publisher","first-page":"4057","DOI":"10.1109\/TIP.2019.2956143","volume":"29","author":"K Wang","year":"2020","unstructured":"Wang K, Peng X, Yang J, Meng D, Qiao Y (2020) Region attention networks for pose and occlusion robust facial expression recognition. IEEE Trans Image Process 29:4057\u20134069. https:\/\/doi.org\/10.1109\/TIP.2019.2956143","journal-title":"IEEE Trans Image Process"},{"key":"1608_CR27","doi-asserted-by":"publisher","first-page":"35","DOI":"10.1016\/j.neucom.2021.02.088","volume":"443","author":"D Xia","year":"2021","unstructured":"Xia D, Liu H, Xu L, Wang L (2021) Visible-infrared person re-identification with data augmentation via cycle-consistent adversarial network. Neurocomputing 443:35\u201346. https:\/\/doi.org\/10.1016\/j.neucom.2021.02.088","journal-title":"Neurocomputing"},{"issue":"3","key":"1608_CR28","doi-asserted-by":"publisher","first-page":"545","DOI":"10.1109\/JSTSP.2022.3233716","volume":"17","author":"H Liu","year":"2023","unstructured":"Liu H, Xia D, Jiang W (2023) Towards homogeneous modality learning and multi-granularity information exploration for visible-infrared person re-identification. IEEE J Select Top Signal Process 17(3):545\u2013559. https:\/\/doi.org\/10.1109\/JSTSP.2022.3233716","journal-title":"IEEE J Select Top Signal Process"},{"key":"1608_CR29","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2024.112130","volume":"300","author":"Y Hu","year":"2024","unstructured":"Hu Y, Niu A, Sun J, Zhu Y, Yan Q, Dong W, Woniak M, Zhang Y (2024) Dynamic center point learning for multiple object tracking under Severe occlusions. Knowl Based Syst 300:112130. https:\/\/doi.org\/10.1016\/j.knosys.2024.112130","journal-title":"Knowl Based Syst"},{"key":"1608_CR30","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2024.110802","volume":"156","author":"Q Yan","year":"2024","unstructured":"Yan Q, Wang H, Ma Y, Liu Y, Dong W, Woniak M, Zhang Y (2024) Uncertainty estimation in HDR imaging with Bayesian neural networks. Pattern Recogn 156:110802. https:\/\/doi.org\/10.1016\/j.patcog.2024.110802","journal-title":"Pattern Recogn"},{"key":"1608_CR31","doi-asserted-by":"publisher","unstructured":"Salmam FZ, Madani A, Kissi M (2016) Facial expression recognition using decision trees. In: 2016 13th International Conference on Computer Graphics, Imaging and Visualization (CGiV), pp 125\u2013130. https:\/\/doi.org\/10.1109\/CGiV.2016.33","DOI":"10.1109\/CGiV.2016.33"},{"issue":"2","key":"1608_CR32","doi-asserted-by":"publisher","first-page":"544","DOI":"10.1109\/TAFFC.2018.2880201","volume":"12","author":"M Li","year":"2021","unstructured":"Li M, Xu H, Huang X, Song Z, Liu X, Li X (2021) Facial expression recognition with identity and emotion joint learning. IEEE Trans Affect Comput 12(2):544\u2013550. https:\/\/doi.org\/10.1109\/TAFFC.2018.2880201","journal-title":"IEEE Trans Affect Comput"},{"key":"1608_CR33","unstructured":"Simonyan K, Zisserman A (2015) Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556"},{"key":"1608_CR34","doi-asserted-by":"publisher","unstructured":"Szegedy C, Vanhoucke V, Ioffe S, Shlens J, Wojna Z (2016) Rethinking the inception architecture for computer vision. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp 2818\u20132826. https:\/\/doi.org\/10.1109\/CVPR.2016.308","DOI":"10.1109\/CVPR.2016.308"},{"key":"1608_CR35","doi-asserted-by":"publisher","first-page":"100","DOI":"10.1109\/TMM.2021.3121547","volume":"25","author":"JY Choi","year":"2023","unstructured":"Choi JY, Lee B (2023) Combining deep convolutional neural networks with stochastic ensemble weight optimization for facial expression recognition in the wild. IEEE Trans Multimed 25:100\u2013111. https:\/\/doi.org\/10.1109\/TMM.2021.3121547","journal-title":"IEEE Trans Multimed"},{"key":"1608_CR36","doi-asserted-by":"publisher","unstructured":"Fan J, Zhou J, Deng X, Wang H, Tao L, Kwan HK (2022) Combating uncertainty and class imbalance in facial expression recognition. In: TENCON 2022-2022 IEEE Region 10 Conference (TENCON), pp 1\u20134. https:\/\/doi.org\/10.1109\/TENCON55691.2022.9977693","DOI":"10.1109\/TENCON55691.2022.9977693"},{"key":"1608_CR37","doi-asserted-by":"publisher","unstructured":"Li X, Yue R, Jia W, Wang H, Zheng Y (2021) Recognizing students\u2019 emotions based on facial expression analysis. In: 2021 11th International Conference on Information Technology in Medicine and Education (ITME), pp 96\u2013100. https:\/\/doi.org\/10.1109\/ITME53901.2021.00030","DOI":"10.1109\/ITME53901.2021.00030"},{"key":"1608_CR38","doi-asserted-by":"publisher","unstructured":"Abdullah M, Alkan A (2022) A comparative approach for facial expression recognition in higher education using hybrid-deep learning from students\u2019 facial images. Traitement du Signal 39:1929\u20131941. https:\/\/doi.org\/10.18280\/ts.390605","DOI":"10.18280\/ts.390605"},{"issue":"1","key":"1608_CR39","doi-asserted-by":"publisher","first-page":"597","DOI":"10.1007\/s40747-022-00796-5","volume":"9","author":"H Wu","year":"2023","unstructured":"Wu H, Lu Z, Zhang J (2023) A privacy-preserving student status monitoring system. Complex Intell Syst 9(1):597\u2013608. https:\/\/doi.org\/10.1007\/s40747-022-00796-5","journal-title":"Complex Intell Syst"},{"key":"1608_CR40","unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, Kaiser L, Polosukhin I (2017) Attention is all you need. In: 31st Annual Conference on Neural Information Processing Systems (NIPS)"},{"key":"1608_CR41","unstructured":"Cao K, Wei C, Gaidon A, Arechiga N, Ma T (2019) Learning imbalanced datasets with label-distribution-aware margin loss. In: 33rd Conference on Neural Information Processing Systems (NIPS)"},{"issue":"2","key":"1608_CR42","doi-asserted-by":"publisher","first-page":"318","DOI":"10.1109\/TPAMI.2018.2858826","volume":"42","author":"TY Lin","year":"2020","unstructured":"Lin TY, Goyal P, Girshick R, He K, Dollr P (2020) Focal loss for dense object detection. IEEE Trans Pattern Anal Mach Intell 42(2):318\u2013327. https:\/\/doi.org\/10.1109\/TPAMI.2018.2858826","journal-title":"IEEE Trans Pattern Anal Mach Intell"},{"key":"1608_CR43","unstructured":"Sadi AA, Chowdhury L, Jahan NWB, Rafi MNS, Chowdhury R, Khan FA, Mohammed N (2022) LMFLOSS: A hybrid loss for imbalanced medical image classification. (arXiv preprint) arXiv:2212.12741"},{"key":"1608_CR44","doi-asserted-by":"publisher","unstructured":"Huang C (2017) Combining convolutional neural networks for emotion recognition. In: 2017 IEEE MIT Undergraduate Research Technology Conference (URTC), pp 1\u20134. https:\/\/doi.org\/10.1109\/URTC.2017.8284175","DOI":"10.1109\/URTC.2017.8284175"},{"key":"1608_CR45","doi-asserted-by":"publisher","unstructured":"Albanie S, Nagrani A, Vedaldi A, Zisserman A (2018) Emotion recognition in speech using cross-modal transfer in the wild. In: Proceedings of the 26th ACM International Conference on Multimedia, pp 292\u2013301. https:\/\/doi.org\/10.1145\/3240508.3240578","DOI":"10.1145\/3240508.3240578"},{"key":"1608_CR46","doi-asserted-by":"publisher","unstructured":"Wang K, Peng X, Yang J, Lu S, Qiao Y (2020) Suppressing uncertainties for large-scale facial expression recognition. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 6896\u20136905. https:\/\/doi.org\/10.1109\/CVPR42600.2020.00693","DOI":"10.1109\/CVPR42600.2020.00693"},{"issue":"6","key":"1608_CR47","doi-asserted-by":"publisher","first-page":"4435","DOI":"10.1016\/j.aej.2021.09.066","volume":"61","author":"Y Nan","year":"2022","unstructured":"Nan Y, Ju J, Hua Q, Zhang H, Wang B (2022) A-MobileNet: an approach of facial expression recognition. Alexand Eng J 61(6):4435\u20134444. https:\/\/doi.org\/10.1016\/j.aej.2021.09.066","journal-title":"Alexand Eng J"},{"key":"1608_CR48","doi-asserted-by":"publisher","first-page":"2016","DOI":"10.1109\/TIP.2021.3049955","volume":"30","author":"H Li","year":"2021","unstructured":"Li H, Wang N, Ding X, Yang X, Gao X (2021) Adaptively learning facial expression representation via C-F labels and distillation. IEEE Trans Image Process 30:2016\u20132028. https:\/\/doi.org\/10.1109\/TIP.2021.3049955","journal-title":"IEEE Trans Image Process"},{"key":"1608_CR49","doi-asserted-by":"publisher","unstructured":"Jin R, Zhao S, Hao Z, Xu Y, Xu T, Chen E (2022) AVT: Au-assisted visual transformer for facial expression recognition. In: 2022 IEEE International Conference on Image Processing (ICIP), pp 2661\u20132665. https:\/\/doi.org\/10.1109\/ICIP46576.2022.9897960","DOI":"10.1109\/ICIP46576.2022.9897960"},{"issue":"2","key":"1608_CR50","doi-asserted-by":"publisher","first-page":"882","DOI":"10.1109\/TCSVT.2023.3237006","volume":"34","author":"C Li","year":"2024","unstructured":"Li C, Li X, Wang X, Huang D, Liu Z, Liao L (2024) FG-AGR: fine-grained associative graph representation for facial expression recognition in the wild. IEEE Trans Circ Syst Video Technol 34(2):882\u2013896. https:\/\/doi.org\/10.1109\/TCSVT.2023.3237006","journal-title":"IEEE Trans Circ Syst Video Technol"},{"key":"1608_CR51","doi-asserted-by":"crossref","unstructured":"Zeng J, Shan S, Chen X (2018) Facial expression recognition with inconsistently annotated datasets. Comput Vis ECCV 2018:227\u2013243","DOI":"10.1007\/978-3-030-01261-8_14"},{"key":"1608_CR52","doi-asserted-by":"publisher","unstructured":"Zhao Z, Liu Q, Zhou F (2021) Robust lightweight facial expression recognition network with label distribution training. In: Proceedings of the AAAI conference on artificial intelligence, pp 3510\u20133519. https:\/\/doi.org\/10.1609\/aaai.v35i4.16465","DOI":"10.1609\/aaai.v35i4.16465"},{"issue":"9","key":"1608_CR53","doi-asserted-by":"publisher","first-page":"6253","DOI":"10.1109\/TCSVT.2022.3165321","volume":"32","author":"H Liu","year":"2022","unstructured":"Liu H, Cai H, Lin Q, Li X, Xiao H (2022) Adaptive multilayer perceptual attention network for facial expression recognition. IEEE Trans Circ Syst Video Technol 32(9):6253\u20136266. https:\/\/doi.org\/10.1109\/TCSVT.2022.3165321","journal-title":"IEEE Trans Circ Syst Video Technol"},{"key":"1608_CR54","doi-asserted-by":"publisher","unstructured":"Bonnard J, Dapogny A, Dhombres F, Bailly K (2022) Privileged attribution constrained deep networks for facial expression recognition. In: 2022 26th International Conference on Pattern Recognition (ICPR), pp 1055\u20131061. https:\/\/doi.org\/10.1109\/ICPR56361.2022.9956496","DOI":"10.1109\/ICPR56361.2022.9956496"},{"key":"1608_CR55","doi-asserted-by":"publisher","first-page":"206","DOI":"10.1016\/j.ins.2023.03.105","volume":"634","author":"X Chen","year":"2023","unstructured":"Chen X, Zheng X, Sun K, Liu W, Zhang Y (2023) Self-supervised vision transformer-based few-shot learning for facial expression recognition. Inform Sci 634:206\u2013226. https:\/\/doi.org\/10.1016\/j.ins.2023.03.105","journal-title":"Inform Sci"},{"key":"1608_CR56","doi-asserted-by":"publisher","unstructured":"Chen Y, Wang J, Chen S, Shi Z, Cai J (2019) Facial motion prior networks for facial expression recognition. In: 2019 IEEE Visual Communications and Image Processing (VCIP), pp 1\u20134. https:\/\/doi.org\/10.1109\/VCIP47243.2019.8965826","DOI":"10.1109\/VCIP47243.2019.8965826"},{"key":"1608_CR57","doi-asserted-by":"publisher","unstructured":"Ding H, Zhou P, Chellappa R (2020) Occlusion-adaptive deep network for robust facial expression recognition. In: 2020 IEEE International Joint Conference on Biometrics (IJCB), pp 1\u20139. https:\/\/doi.org\/10.1109\/IJCB48548.2020.9304923","DOI":"10.1109\/IJCB48548.2020.9304923"},{"key":"1608_CR58","doi-asserted-by":"publisher","unstructured":"She J, Hu Y, Shi H, Wang J, Shen Q, Mei T (2021) Dive into ambiguity: latent distribution mining and pairwise uncertainty estimation for facial expression recognition. In: 2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp 6244-6253. https:\/\/doi.org\/10.1109\/CVPR46437.2021.00618","DOI":"10.1109\/CVPR46437.2021.00618"},{"issue":"3","key":"1608_CR59","doi-asserted-by":"publisher","first-page":"2336","DOI":"10.1109\/TAFFC.2022.3144439","volume":"14","author":"E Arnaud","year":"2022","unstructured":"Arnaud E, Dapogny A, Bailly K (2022) Thin: throwable information networks and application for facial expression recognition in the wild. IEEE Trans Affect Comput 14(3):2336\u20132348. https:\/\/doi.org\/10.1109\/TAFFC.2022.3144439","journal-title":"IEEE Trans Affect Comput"},{"key":"1608_CR60","doi-asserted-by":"publisher","unstructured":"Zeng D, Lin Z, Yan X, Liu Y, Wang F, Tang B (2022) Face2exp: combating data biases for facial expression recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition(CVPR), pp 20291\u201320300. https:\/\/doi.org\/10.1109\/cvpr52688.2022.01965","DOI":"10.1109\/cvpr52688.2022.01965"},{"issue":"06","key":"1608_CR61","doi-asserted-by":"publisher","first-page":"2350032","DOI":"10.1142\/S0129065723500326","volume":"33","author":"Y Yang","year":"2023","unstructured":"Yang Y, Hu L, Zu C, Zhou Q, Wu X, Zhou J, Wang Y (2023) Facial expression recognition with contrastive learning and uncertainty-guided relabeling. Int J Neural Syst 33(06):2350032. https:\/\/doi.org\/10.1142\/S0129065723500326","journal-title":"Int J Neural Syst"},{"issue":"5","key":"1608_CR62","doi-asserted-by":"publisher","first-page":"2033","DOI":"10.1109\/TCSVT.2022.3220669","volume":"33","author":"Y Gu","year":"2023","unstructured":"Gu Y, Yan H, Zhang X, Wang Y, Ji Y, Ren F (2023) Toward Facial Expression Recognition in the Wild via Noise-Tolerant Network. IEEE Trans Circ Syst Video Technol 33(5):2033\u20132047. https:\/\/doi.org\/10.1109\/TCSVT.2022.3220669","journal-title":"IEEE Trans Circ Syst Video Technol"}],"container-title":["Complex &amp; Intelligent Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-024-01608-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s40747-024-01608-8\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-024-01608-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,30]],"date-time":"2025-01-30T20:19:48Z","timestamp":1738268388000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s40747-024-01608-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,13]]},"references-count":62,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2025,1]]}},"alternative-id":["1608"],"URL":"https:\/\/doi.org\/10.1007\/s40747-024-01608-8","relation":{},"ISSN":["2199-4536","2198-6053"],"issn-type":[{"value":"2199-4536","type":"print"},{"value":"2198-6053","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,13]]},"assertion":[{"value":"8 October 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 September 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"13 November 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"27"}}