{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,19]],"date-time":"2026-03-19T15:25:59Z","timestamp":1773933959110,"version":"3.50.1"},"reference-count":97,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,11,11]],"date-time":"2025-11-11T00:00:00Z","timestamp":1762819200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,11,11]],"date-time":"2025-11-11T00:00:00Z","timestamp":1762819200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Cluster Comput"],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1007\/s10586-025-05807-x","type":"journal-article","created":{"date-parts":[[2025,11,11]],"date-time":"2025-11-11T20:31:06Z","timestamp":1762893066000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Video-based emotion recognition using motion-aware deep hybrid learning"],"prefix":"10.1007","volume":"29","author":[{"given":"Navneet","family":"Gupta","sequence":"first","affiliation":[]},{"given":"R. Vishnu","family":"Priya","sequence":"additional","affiliation":[]},{"given":"Chandan Kumar","family":"Verma","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,11,11]]},"reference":[{"key":"5807_CR1","unstructured":"Peng, M., et al.: Recognizing micro-expression in video clip with adaptive key-frame mining, arXiv preprint arXiv:2009.09179, (2020)"},{"key":"5807_CR2","doi-asserted-by":"crossref","unstructured":"Aminbeidokhti, M., Pedersoli, M., Cardinal, P., Granger, E.: Emotion recognition with spatial attention and temporal softmax pooling, in Image Analysis and Recognition: 16th International Conference, ICIAR Waterloo, ON, Canada, August 27\u201329, 2019, Proceedings, Part I 16, 2019, pp. 323\u2013331: Springer. (2019)","DOI":"10.1007\/978-3-030-27202-9_29"},{"key":"5807_CR3","doi-asserted-by":"crossref","unstructured":"Meng, D., Peng, X., Wang, K., Qiao, Y.: Frame attention networks for facial expression recognition in videos, in 2019 IEEE international conference on image processing (ICIP), pp. 3866\u20133870: IEEE. (2019)","DOI":"10.1109\/ICIP.2019.8803603"},{"key":"5807_CR4","first-page":"129","volume":"74","author":"YS Gan","year":"2019","unstructured":"Gan, Y.S., Liong, S.-T., Yau, W.-C., Huang, Y.-C., Tan, L.-K.: OFF-ApexNet on micro-expression recognition system. Signal Processing: Image Communication 74, 129\u2013139 (2019)","journal-title":"Signal Processing: Image Communication"},{"key":"5807_CR5","doi-asserted-by":"crossref","unstructured":"Lee, J., Kim, S., Kiim, S., Sohn, K.: Spatiotemporal attention based deep neural networks for emotion recognition, in IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2018, pp. 1513\u20131517: IEEE. (2018)","DOI":"10.1109\/ICASSP.2018.8461920"},{"key":"5807_CR6","unstructured":"Dosovitskiy, A.: An image is worth 16x16 words: Transformers for image recognition at scale, arXiv preprint arXiv:2010.11929, (2020)"},{"key":"5807_CR7","unstructured":"Farhadi, A., Redmon, J.: Yolov3: An incremental improvement. In Computer Vision and Pattern Recognition, vol. 1804, pp. 1\u20136. Springer Berlin\/Heidelberg, Germany (2018)"},{"key":"5807_CR8","doi-asserted-by":"publisher","first-page":"135","DOI":"10.1016\/j.procs.2017.03.069","volume":"107","author":"J Li","year":"2017","unstructured":"Li, J., et al.: Facial expression recognition with faster R-CNN. Procedia Comput. Sci. 107, 135\u2013140 (2017)","journal-title":"Procedia Comput. Sci."},{"key":"5807_CR9","doi-asserted-by":"crossref","unstructured":"Zeng, Z., Pantic, M., Roisman, G.I., Huang, T.S.: A survey of affect recognition methods: audio, visual and spontaneous expressions, in Proceedings of the 9th international conference on Multimodal interfaces, pp. 126\u2013133. (2007)","DOI":"10.1145\/1322192.1322216"},{"key":"5807_CR10","doi-asserted-by":"crossref","unstructured":"Bartlett, M.S., Littlewort, G., Frank, M., Lainscsek, C., Fasel, I., Movellan, J., Recognizing facial expression: machine learning and application to spontaneous behavior, in: IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR\u201905), 2005, vol. 2, pp. 568\u2013573: IEEE. (2005)","DOI":"10.1109\/CVPR.2005.297"},{"key":"5807_CR11","doi-asserted-by":"publisher","first-page":"549","DOI":"10.1007\/s10586-017-0935-z","volume":"21","author":"M Sajjad","year":"2018","unstructured":"Sajjad, M., Shah, A., Jan, Z., Shah, S.I., Baik, S.W., Mehmood, I.: Facial appearance and texture feature-based robust facial expression recognition framework for sentiment knowledge discovery. Cluster Comput. 21, 549\u2013567 (2018)","journal-title":"Cluster Comput."},{"key":"5807_CR12","doi-asserted-by":"crossref","unstructured":"Wang, R., Guo, C., Shabaz, M., Rida, I., Cambria, E., Zhu, X.: CIME: Contextual Interaction-based Multimodal Emotion Analysis with Enhanced Semantic Information, IEEE Transactions on Computational Social Systems, (2025)","DOI":"10.22541\/au.173750886.60448227\/v1"},{"key":"5807_CR13","doi-asserted-by":"publisher","DOI":"10.1016\/j.cmpb.2024.108564","volume":"260","author":"X Zhu","year":"2025","unstructured":"Zhu, X., et al.: A client\u2013server based recognition system: Non-contact single\/multiple emotional and behavioral state assessment methods. Comput. Methods Programs Biomed. 260, 108564 (2025)","journal-title":"Comput. Methods Programs Biomed."},{"key":"5807_CR14","doi-asserted-by":"crossref","unstructured":"Zheng, J., et al.: Dynamic Spectral Graph Anomaly Detection, in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 39, no. 12, pp. 13410\u201313418. (2025)","DOI":"10.1609\/aaai.v39i12.33464"},{"key":"5807_CR15","doi-asserted-by":"crossref","unstructured":"Ebrahimi Kahou, S., Michalski, V., Konda, K., Memisevic, R., Pal, C.: Recurrent neural networks for emotion recognition in video, in Proceedings of the ACM on international conference on multimodal interaction, 2015, pp. 467\u2013474. (2015)","DOI":"10.1145\/2818346.2830596"},{"issue":"3","key":"5807_CR16","doi-asserted-by":"publisher","DOI":"10.1007\/s12559-025-10463-9","volume":"17","author":"R Wang","year":"2025","unstructured":"Wang, R., et al.: Contrastive-Based removal of negative information in multimodal emotion analysis. Cogn. Comput 17(3), 107 (2025)","journal-title":"Cogn. Comput"},{"key":"5807_CR17","doi-asserted-by":"publisher","first-page":"14343","DOI":"10.1007\/s11042-020-10203-1","volume":"80","author":"J Wei","year":"2021","unstructured":"Wei, J., Yang, X., Dong, Y.: User-generated video emotion recognition based on key frames. Multimedia Tools and Applications 80, 14343\u201314361 (2021)","journal-title":"Multimedia Tools and Applications"},{"key":"5807_CR18","doi-asserted-by":"publisher","DOI":"10.1016\/j.ins.2024.120138","volume":"660","author":"B Pan","year":"2024","unstructured":"Pan, B., Hirota, K., Dai, Y., Jia, Z., Fukushima, E.F., She, J.: Adaptive key-frame selection-based facial expression recognition via multi-cue dynamic features hybrid fusion. Information Sciences 660, 120138 (2024)","journal-title":"Information Sciences"},{"key":"5807_CR19","unstructured":"Guo, H., Yu, W., Que, S., Du, K., Yan, Y., Wang, H.: Video-to-Task Learning via Motion-Guided Attention for Few-Shot Action Recognition, arXiv preprint arXiv:2411.11335, (2024)"},{"issue":"1","key":"5807_CR20","doi-asserted-by":"publisher","first-page":"53","DOI":"10.18178\/joig.11.1.53-60","volume":"11","author":"TKT Zizi","year":"2023","unstructured":"Zizi, T.K.T., Ramli, S., Wook, M., Shukran, M.A.M.: Optical flow-based algorithm analysis to detect human emotion from eye movement-image data. Journal of Image and Graphics 11(1), 53\u201360 (2023)","journal-title":"Journal of Image and Graphics"},{"issue":"4","key":"5807_CR21","doi-asserted-by":"publisher","first-page":"3674","DOI":"10.11591\/ijece.v12i4.pp3674-3683","volume":"12","author":"V Sekar","year":"2022","unstructured":"Sekar, V., Jawaharlalnehru, A.: Semantic-based visual emotion recognition in videos-a transfer learning approach. International Journal of Electrical and Computer Engineering (IJECE) 12(4), 3674\u20133683 (2022)","journal-title":"International Journal of Electrical and Computer Engineering (IJECE)"},{"key":"5807_CR22","doi-asserted-by":"crossref","unstructured":"Pikoulis, I., Filntisis, P.P., Maragos, P.: Leveraging semantic scene characteristics and multi-stream convolutional architectures in a contextual approach for video-based visual emotion recognition in the wild, in 16th IEEE International Conference on Automatic Face and Gesture Recognition (FG 2021), 2021, pp. 01\u201308: IEEE. (2021)","DOI":"10.1109\/FG52635.2021.9666957"},{"key":"5807_CR23","doi-asserted-by":"publisher","first-page":"434","DOI":"10.1016\/j.neucom.2022.05.077","volume":"500","author":"B Allaert","year":"2022","unstructured":"Allaert, B., Ward, I.R., Bilasco, I.M., Djeraba, C., Bennamoun, M.: A comparative study on optical flow for facial expression analysis. Neurocomputing 500, 434\u2013448 (2022)","journal-title":"Neurocomputing"},{"key":"5807_CR24","doi-asserted-by":"crossref","unstructured":"Chai, M., Zhang, X., Cheng, D., Tong, W., Wang, K.: Gesture recognition method based on improved YOLOv5 in complex background, in 4th International Conference on Neural Networks, Information and Communication (NNICE), 2024, pp. 730\u2013734: IEEE. (2024)","DOI":"10.1109\/NNICE61279.2024.10498303"},{"key":"5807_CR25","doi-asserted-by":"crossref","unstructured":"Xu, Q., Yu, J., Dong, A.: Improvement of Low-Contrast Objective Detecting Capability for YOLOv5 Based on Receptive Field Enhancement and Redundant Feature Reuse, in International Joint Conference on Neural Networks (IJCNN), 2024, pp. 1\u20139: IEEE. (2024)","DOI":"10.1109\/IJCNN60899.2024.10650738"},{"key":"5807_CR26","doi-asserted-by":"crossref","unstructured":"Xiao, Z., Li, Z., Zhang, D.: YOLOv5s-Contextual-Fire: Introducing Contextual Transformer to YOLO for More Accurate Fire Detection, in International Conference on Cognitive based Information Processing and Applications, pp. 563\u2013573: Springer. (2023)","DOI":"10.1007\/978-981-97-1979-2_48"},{"issue":"10","key":"5807_CR27","doi-asserted-by":"publisher","first-page":"12178","DOI":"10.1109\/TII.2024.3414489","volume":"20","author":"C-S Jiang","year":"2024","unstructured":"Jiang, C.-S., Liu, Z.-T., She, J.: Hierarchical co-consistency quantization and information refining binary network for facial expression recognition in human\u2013robot interaction. IEEE Transactions on Industrial Informatics 20(10), 12178\u201312188 (2024)","journal-title":"IEEE Transactions on Industrial Informatics"},{"key":"5807_CR28","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2024.106624","volume":"179","author":"C Cheng","year":"2024","unstructured":"Cheng, C., Liu, W., Feng, L., Jia, Z.: Emotion recognition using hierarchical spatial\u2013temporal learning transformer from regional to global brain. Neural Netw. 179, 106624 (2024)","journal-title":"Neural Netw."},{"key":"5807_CR29","doi-asserted-by":"crossref","unstructured":"Cioroiu, G., Radoi, A.: Emotion Recognition from Contextualized Speech Representations using Fine-tuned Transformers, in 15th International Conference on Communications (COMM), 2024, pp. 1\u20135: IEEE. (2024)","DOI":"10.1109\/COMM62355.2024.10741432"},{"key":"5807_CR30","doi-asserted-by":"crossref","unstructured":"Liu, Y., Geng, D., Wu, X., Liu, Y.: Multimodal Emotion Recognition based on Convolutional Neural Networks and Long Short-Term Memory Networks, in 2024 2nd International Conference on Signal Processing and Intelligent Computing (SPIC), pp. 69\u201373: IEEE. (2024)","DOI":"10.1109\/SPIC62469.2024.10691536"},{"key":"5807_CR31","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2025.105519","volume":"158","author":"M Gao","year":"2025","unstructured":"Gao, M., et al.: Towards trustworthy image super-resolution via symmetrical and recursive artificial neural network. Image and Vision Computing 158, 105519 (2025)","journal-title":"Image and Vision Computing"},{"key":"5807_CR32","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2025.105582","author":"S Guo","year":"2025","unstructured":"Guo, S., Li, Q., Gao, M., Zhu, X., Rida, I.: Generalizable deepfake detection via Spatial kernel selection and halo attention network. Image and Vision Computing (2025). https:\/\/doi.org\/10.1016\/j.imavis.2025.105582","journal-title":"Image and Vision Computing"},{"key":"5807_CR33","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2025.103268","author":"X Zhu","year":"2025","unstructured":"Zhu, X., et al.: RMER-DT: Robust multimodal emotion recognition in conversational contexts based on diffusion and Transformers. Information Fusion (2025). https:\/\/doi.org\/10.1016\/j.inffus.2025.103268","journal-title":"Information Fusion"},{"key":"5807_CR34","doi-asserted-by":"crossref","unstructured":"Safavi, F., Patel, K., Vinjamuri, R.: Facial expression recognition with an efficient mix transformer for affective Human-Robot interaction. IEEE Trans. Affect. Comput., (2025)","DOI":"10.1109\/TAFFC.2025.3567966"},{"key":"5807_CR35","doi-asserted-by":"publisher","DOI":"10.1109\/TIM.2025.3578178","author":"C-S Jiang","year":"2025","unstructured":"Jiang, C.-S., Liu, Z.-T., Fukushima, E.F., She, J.: Motion semantic enhancement and autonomous information mining for Static-Dynamic visual emotion recognition in Human-Robot interaction. IEEE Trans. Instrum. Meas. (2025). https:\/\/doi.org\/10.1109\/TIM.2025.3578178","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"5807_CR36","doi-asserted-by":"crossref","unstructured":"Latif, S., Jurdak, R., Schuller, B.W.: Evaluating Transformer-Enhanced Deep Reinforcement Learning for Speech Emotion Recognition, Proceedings of Interspeech, Kos Island, Greece, September,, 2024. (2024)","DOI":"10.21437\/Interspeech.2024-1827"},{"key":"5807_CR37","doi-asserted-by":"crossref","unstructured":"Wang, J., Gao, M., Zhai, W., Rida, I., Zhu, X., Li, Q.: Knowledge generation and distillation for road segmentation in intelligent transportation systems. IEEE Trans. Intell. Transp. Syst., (2025)","DOI":"10.1109\/TITS.2025.3577794"},{"key":"5807_CR38","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2025.111993","volume":"169","author":"Y Ye","year":"2026","unstructured":"Ye, Y., Liu, N., Zhao, Y., Zhu, X., Wang, J., Liu, Y.: Advancing federated domain generalization in ophthalmology: Vision enhancement and consistency assurance for multicenter fundus image segmentation. Pattern Recognition 169, 111993 (2026)","journal-title":"Pattern Recognition"},{"key":"5807_CR39","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition, in Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770\u2013778. (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"5807_CR40","doi-asserted-by":"crossref","unstructured":"Weinzaepfel, P., Revaud, J., Harchaoui, Z., Schmid, C.: Deepflow: Large displacement optical flow with deep matching. in Proceedings of the IEEE international conference on computer vision, pp. 1385\u20131392. (2013)","DOI":"10.1109\/ICCV.2013.175"},{"key":"5807_CR41","unstructured":"Jocher, G., et al.: ultralytics\/yolov5: v7. 0-yolov5 sota realtime instance segmentation, Zenodo, (2022)"},{"key":"5807_CR42","unstructured":"Stevens, E., Antiga, L., Viehmann, T.: Deep Learning with PyTorch. Manning (2020)"},{"key":"5807_CR43","doi-asserted-by":"publisher","DOI":"10.1016\/j.array.2022.100258","author":"A Mumuni","year":"2022","unstructured":"Mumuni, A., Mumuni, F.: Data augmentation: A comprehensive survey of modern approaches. Array (2022). https:\/\/doi.org\/10.1016\/j.array.2022.100258","journal-title":"Array"},{"key":"5807_CR44","unstructured":"Tan, M., Le, Q.: Efficientnet: Rethinking model scaling for convolutional neural networks, in International conference on machine learning, pp. 6105\u20136114: PMLR. (2019)"},{"key":"5807_CR45","unstructured":"Vaswani, A.: Attention is all you need. Adv. Neural. Inf. Process. Syst., (2017)"},{"key":"5807_CR46","doi-asserted-by":"crossref","unstructured":"Lee, J., Kim, S., Kim, S., Park, J., Sohn, K.: Context-aware emotion recognition networks, in Proceedings of the IEEE\/CVF international conference on computer vision, pp. 10143\u201310152. (2019)","DOI":"10.1109\/ICCV.2019.01024"},{"key":"5807_CR47","doi-asserted-by":"crossref","unstructured":"Lucey, P., Cohn, J.F., Kanade, T., Saragih, J., Ambadar, Z., Matthews, I.: The extended cohn-kanade dataset (ck+): A complete dataset for action unit and emotion-specified expression, in ieee computer society conference on computer vision and pattern recognition-workshops, 2010, pp. 94\u2013101: IEEE. (2010)","DOI":"10.1109\/CVPRW.2010.5543262"},{"key":"5807_CR48","doi-asserted-by":"crossref","unstructured":"Jiang, X., et al.: Dfew: A large-scale database for recognizing dynamic facial expressions in the wild, in Proceedings of the 28th ACM international conference on multimedia, pp. 2881\u20132889. (2020)","DOI":"10.1145\/3394171.3413620"},{"key":"5807_CR49","doi-asserted-by":"crossref","unstructured":"Wang, Y., et al.: Ferv39k: A large-scale multi-scene dataset for facial expression recognition in videos, in Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 20922\u201320931. (2022)","DOI":"10.1109\/CVPR52688.2022.02025"},{"issue":"5","key":"5807_CR50","doi-asserted-by":"publisher","DOI":"10.1371\/journal.pone.0196391","volume":"13","author":"SR Livingstone","year":"2018","unstructured":"Livingstone, S.R., Russo, F.A.: The Ryerson Audio-Visual database of emotional speech and song (RAVDESS): A dynamic, multimodal set of facial and vocal expressions in North American english. PloS One 13(5), e0196391 (2018)","journal-title":"PloS One"},{"key":"5807_CR51","doi-asserted-by":"crossref","unstructured":"Wu, K., et al.: Tinyvit: Fast pretraining distillation for small vision transformers, in European conference on computer vision, pp. 68\u201385: Springer. (2022)","DOI":"10.1007\/978-3-031-19803-8_5"},{"key":"5807_CR52","doi-asserted-by":"crossref","unstructured":"Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., Chen, L.-C.: Mobilenetv2: Inverted residuals and linear bottlenecks, in Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 4510\u20134520. (2018)","DOI":"10.1109\/CVPR.2018.00474"},{"key":"5807_CR53","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: Hierarchical vision transformer using shifted windows, in Proceedings of the IEEE\/CVF international conference on computer vision, pp. 10012\u201310022. (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"5807_CR54","doi-asserted-by":"crossref","unstructured":"Li, X., Peng, X., Ding, C.: Sequential interactive biased network for context-aware emotion recognition, in IEEE International Joint Conference on Biometrics (IJCB), 2021, pp. 1\u20136: IEEE. (2021)","DOI":"10.1109\/IJCB52358.2021.9484370"},{"key":"5807_CR55","doi-asserted-by":"publisher","first-page":"6488","DOI":"10.1109\/ACCESS.2020.3048693","volume":"9","author":"Q Gao","year":"2021","unstructured":"Gao, Q., Zeng, H., Li, G., Tong, T.: Graph reasoning-based emotion recognition network. IEEE Access 9, 6488\u20136497 (2021)","journal-title":"IEEE Access"},{"key":"5807_CR56","doi-asserted-by":"publisher","first-page":"6544","DOI":"10.1109\/TIP.2021.3093397","volume":"30","author":"Z Zhao","year":"2021","unstructured":"Zhao, Z., Liu, Q., Wang, S.: Learning deep global multi-scale and local attention features for facial expression recognition in the wild. IEEE Transactions on Image Processing 30, 6544\u20136556 (2021)","journal-title":"IEEE Transactions on Image Processing"},{"key":"5807_CR57","doi-asserted-by":"publisher","DOI":"10.1016\/j.jvcir.2022.103679","volume":"89","author":"Z Wang","year":"2022","unstructured":"Wang, Z., Lao, L., Zhang, X., Li, Y., Zhang, T., Cui, Z.: Context-dependent emotion recognition. Journal of Visual Communication and Image Representation 89, 103679 (2022)","journal-title":"Journal of Visual Communication and Image Representation"},{"key":"5807_CR58","unstructured":"Wu, S., Zhou, L., Hu, Z., Liu, J.: Hierarchical context-based emotion recognition with scene graphs. IEEE Trans. Neural Networks Learn. Syst., (2022)"},{"key":"5807_CR59","doi-asserted-by":"crossref","unstructured":"Yuan, Y., Lu, F., Cheng, X., Liu, Y.: Context Based Vision Emotion Recognition in the Wild, in IEEE 17th Conference on Industrial Electronics and Applications (ICIEA), 2022, pp. 479\u2013484: IEEE. (2022)","DOI":"10.1109\/ICIEA54703.2022.10005917"},{"key":"5807_CR60","doi-asserted-by":"publisher","first-page":"119","DOI":"10.1016\/j.neucom.2022.04.052","volume":"493","author":"Y Guo","year":"2022","unstructured":"Guo, Y., et al.: Facial expressions recognition with multi-region divided attention networks for smart education cloud applications. Neurocomputing 493, 119\u2013128 (2022)","journal-title":"Neurocomputing"},{"key":"5807_CR61","doi-asserted-by":"crossref","unstructured":"Wang, L., Jia, G., Jiang, N., Wu, H., Yang, J.: Ease: Robust facial expression recognition via emotion ambiguity-sensitive cooperative networks, in Proceedings of the 30th ACM international conference on multimedia, pp. 218\u2013227. (2022)","DOI":"10.1145\/3503161.3548005"},{"issue":"1","key":"5807_CR62","doi-asserted-by":"publisher","first-page":"650","DOI":"10.1109\/TAFFC.2021.3064918","volume":"14","author":"W Li","year":"2021","unstructured":"Li, W., Dong, X., Wang, Y.: Human emotion recognition with relational region-level analysis. IEEE Trans. Affect. Comput. 14(1), 650\u2013663 (2021)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"5807_CR63","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2023.104824","volume":"139","author":"R Xu","year":"2023","unstructured":"Xu, R., Huang, A., Hu, Y., Feng, X.: GFFT: Global-local feature fusion Transformers for facial expression recognition in the wild. Image and Vision Computing 139, 104824 (2023)","journal-title":"Image and Vision Computing"},{"issue":"2","key":"5807_CR64","doi-asserted-by":"publisher","first-page":"653","DOI":"10.1007\/s00521-023-09040-8","volume":"36","author":"L Qing","year":"2024","unstructured":"Qing, L., Wen, H., Chen, H., Jin, R., Cheng, Y., Peng, Y.: DVC-Net: A new dual-view context-aware network for emotion recognition in the wild. Neural Computing and Applications 36(2), 653\u2013665 (2024)","journal-title":"Neural Computing and Applications"},{"key":"5807_CR65","doi-asserted-by":"crossref","unstructured":"Yang, D., et al.: Context de-confounded emotion recognition, in Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 19005\u201319015. (2023)","DOI":"10.1109\/CVPR52729.2023.01822"},{"key":"5807_CR66","doi-asserted-by":"publisher","first-page":"337","DOI":"10.1016\/j.neunet.2023.11.033","volume":"170","author":"H Tao","year":"2024","unstructured":"Tao, H., Duan, Q.: Hierarchical attention network with progressive feature fusion for facial expression recognition. Neural Netw. 170, 337\u2013348 (2024)","journal-title":"Neural Netw."},{"key":"5807_CR67","doi-asserted-by":"crossref","unstructured":"Gupta, N., Priya, R.V., Verma, C.K.: ERFN: Leveraging context for enhanced emotion detection. Int. J. Adv. Comput. Sci. Appl., 15, 6, (2024)","DOI":"10.14569\/IJACSA.2024.0150663"},{"key":"5807_CR68","doi-asserted-by":"publisher","first-page":"145","DOI":"10.1016\/j.neucom.2020.06.062","volume":"413","author":"D Liu","year":"2020","unstructured":"Liu, D., Ouyang, X., Xu, S., Zhou, P., He, K., Wen, S.: SAANet: Siamese action-units attention network for improving dynamic facial expression recognition. Neurocomputing 413, 145\u2013157 (2020)","journal-title":"Neurocomputing"},{"key":"5807_CR69","doi-asserted-by":"crossref","unstructured":"Liu, D., Zhang, H., Zhou, P.: Video-based facial expression recognition using graph convolutional networks, in 25th International Conference on Pattern Recognition (ICPR), 2021, pp. 607\u2013614: IEEE. (2020)","DOI":"10.1109\/ICPR48806.2021.9413094"},{"issue":"3","key":"5807_CR70","doi-asserted-by":"publisher","first-page":"580","DOI":"10.1109\/TETCI.2021.3070713","volume":"6","author":"X Qu","year":"2021","unstructured":"Qu, X., et al.: Attend to where and when: Cascaded attention network for facial expression recognition. IEEE Trans. Emerg. Top. Comput. Intell. 6(3), 580\u2013592 (2021)","journal-title":"IEEE Trans. Emerg. Top. Comput. Intell."},{"issue":"2","key":"5807_CR71","doi-asserted-by":"publisher","first-page":"468","DOI":"10.1049\/ipr2.12037","volume":"15","author":"M Sharifnejad","year":"2021","unstructured":"Sharifnejad, M., Shahbahrami, A., Akoushideh, A., Hassanpour, R.Z.: Facial expression recognition using a combination of enhanced local binary pattern and pyramid histogram of oriented gradients features extraction. IET Image Processing 15(2), 468\u2013478 (2021)","journal-title":"IET Image Processing"},{"key":"5807_CR72","doi-asserted-by":"crossref","unstructured":"Niu, B., Gao, Z., Guo, B.: Facial expression recognition with LBP and ORB features, Computational Intelligence and Neuroscience, vol. no. 1, p. 8828245, 2021. (2021)","DOI":"10.1155\/2021\/8828245"},{"issue":"1","key":"5807_CR73","doi-asserted-by":"publisher","DOI":"10.1038\/s41598-022-11173-0","volume":"12","author":"T Debnath","year":"2022","unstructured":"Debnath, T., Reza, M.M., Rahman, A., Beheshti, A., Band, S.S., Alinejad-Rokny, H.: Four-layer ConvNet to facial emotion recognition with minimal epochs and the significance of data diversity. Sci. Rep. 12(1), 6991 (2022)","journal-title":"Sci. Rep."},{"issue":"6","key":"5807_CR74","doi-asserted-by":"publisher","first-page":"3273","DOI":"10.1109\/TCSS.2022.3200060","volume":"10","author":"J Tian","year":"2022","unstructured":"Tian, J., She, Y.: A visual\u2013audio-based emotion recognition system integrating dimensional analysis. IEEE Trans. Comput. Social Syst. 10(6), 3273\u20133282 (2022)","journal-title":"IEEE Trans. Comput. Social Syst."},{"issue":"9","key":"5807_CR75","doi-asserted-by":"publisher","first-page":"6341","DOI":"10.1007\/s00371-023-03168-3","volume":"40","author":"S Indolia","year":"2024","unstructured":"Indolia, S., Nigam, S., Singh, R.: A self-attention-based fusion framework for facial expression recognition in wavelet domain. The Visual Computer 40(9), 6341\u20136357 (2024)","journal-title":"The Visual Computer"},{"key":"5807_CR76","doi-asserted-by":"crossref","unstructured":"Dada, E.G., Oyewola, D.O., Joseph, S.B., Emebo, O., Oluwagbemi, O.O.: Facial Emotion Recognition and Classification Using the Convolutional Neural Network-10 (CNN\u201010), Applied Computational Intelligence and Soft Computing, vol. no. 1, p. 2457898, 2023. (2023)","DOI":"10.1155\/2023\/2457898"},{"issue":"2","key":"5807_CR77","doi-asserted-by":"publisher","DOI":"10.3390\/s23020823","volume":"23","author":"C Li","year":"2023","unstructured":"Li, C., Wen, C., Qiu, Y.: A video sequence face expression recognition method based on squeeze-and-excitation and 3dpca network. Sensors 23(2), 823 (2023)","journal-title":"Sensors"},{"key":"5807_CR78","doi-asserted-by":"crossref","unstructured":"Gavade, P.A., Bhat, V.S., Gavade, A.B.: Learning Face Expression Features from Video Using Spatio-Temporal Feature Extractor and CNN-LSTM, in Seventh International Conference on Image Information Processing (ICIIP), 2023, pp. 46\u201350: IEEE. (2023)","DOI":"10.1109\/ICIIP61524.2023.10537794"},{"issue":"7","key":"5807_CR79","doi-asserted-by":"publisher","first-page":"10901","DOI":"10.1007\/s11042-022-13711-4","volume":"82","author":"Y Wu","year":"2023","unstructured":"Wu, Y., Li, J.: Multi-modal emotion identification fusing facial expression and EEG. Multimedia Tools and Applications 82(7), 10901\u201310919 (2023)","journal-title":"Multimedia Tools and Applications"},{"key":"5807_CR80","doi-asserted-by":"crossref","unstructured":"Bi, L., Tang, S., Li, C.: A Facial Expression Recognition Method Based on Improved VGG19 Model, extraction, vol. 15, no. 7, (2024)","DOI":"10.14569\/IJACSA.2024.0150725"},{"key":"5807_CR81","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2023.121419","volume":"237","author":"J Wei","year":"2024","unstructured":"Wei, J., Hu, G., Yang, X., Luu, A.T., Dong, Y.: Learning facial expression and body gesture visual information for video emotion recognition. Expert Systems with Applications 237, 121419 (2024)","journal-title":"Expert Systems with Applications"},{"key":"5807_CR82","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2025.130020","volume":"636","author":"Y L\u00fc","year":"2025","unstructured":"L\u00fc, Y., Zhang, F., Ma, Z., Zheng, B., Nan, Z.: Dynamic facial expression recognition in the wild via Multi-Snippet Spatiotemporal Learning. Neurocomputing 636, 130020 (2025)","journal-title":"Neurocomputing"},{"key":"5807_CR83","unstructured":"Kawamura, R., Hayashi, H., Otake, S., Takemura, N., Nagahara, H.: Enhancing Ambiguous Dynamic Facial Expression Recognition with Soft Label-based Data Augmentation, arXiv preprint arXiv:2506.20867, (2025)"},{"key":"5807_CR84","doi-asserted-by":"crossref","unstructured":"Sun, L., Lian, Z., Liu, B., Tao, J.: Mae-dfer: Efficient masked autoencoder for self-supervised dynamic facial expression recognition, in Proceedings of the 31st ACM International Conference on Multimedia, pp. 6110\u20136121. (2023)","DOI":"10.1145\/3581783.3612365"},{"key":"5807_CR85","first-page":"10078","volume":"35","author":"Z Tong","year":"2022","unstructured":"Tong, Z., Song, Y., Wang, J., Wang, L.: Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training. Adv. Neural. Inf. Process. Syst. 35, 10078\u201310093 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"5807_CR86","doi-asserted-by":"crossref","unstructured":"Wang, H., et al.: Rethinking the learning paradigm for dynamic facial expression recognition, in Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 17958\u201317968. (2023)","DOI":"10.1109\/CVPR52729.2023.01722"},{"key":"5807_CR87","doi-asserted-by":"crossref","unstructured":"Li, H., Niu, H., Zhu, Z., Zhao, F.: Intensity-aware loss for dynamic facial expression recognition in the wild, in Proceedings of the AAAI conference on artificial intelligence, vol. 37, no. 1, pp. 67\u201375. (2023)","DOI":"10.1609\/aaai.v37i1.25077"},{"key":"5807_CR88","doi-asserted-by":"crossref","unstructured":"Wang, Y., et al.: Dpcnet: Dual path multi-excitation collaborative network for facial expression representation learning in videos, in Proceedings of the 30th ACM international conference on multimedia, pp. 101\u2013110. (2022)","DOI":"10.1145\/3503161.3547865"},{"key":"5807_CR89","unstructured":"Li, H., Sui, M., Zhu, Z.: Nr-dfernet: Noise-robust network for dynamic facial expression recognition, arXiv preprint arXiv:2206.04975, (2022)"},{"key":"5807_CR90","doi-asserted-by":"crossref","unstructured":"Zhao, Z., Liu, Q.: Former-dfer: Dynamic facial expression recognition transformer, in Proceedings of the 29th ACM international conference on multimedia, pp. 1553\u20131561. (2021)","DOI":"10.1145\/3474085.3475292"},{"key":"5807_CR91","unstructured":"Ma, F., Sun, B., Li, S.: Spatio-temporal transformer for dynamic facial expression recognition in the wild, arXiv preprint arXiv:2205.04749, (2022)"},{"key":"5807_CR92","doi-asserted-by":"crossref","unstructured":"Zeng, H., Li, G., Tong, T., Gao, Q.: A graph convolutional network for emotion recognition in context, in 2020 Cross Strait Radio Science & Wireless Technology Conference (CSRSWTC), pp. 1\u20133: IEEE. (2020)","DOI":"10.1109\/CSRSWTC50769.2020.9372674"},{"key":"5807_CR93","unstructured":"Su, L., Hu, C., Li, G., Cao, D.: Msaf: Multimodal split attention fusion, arXiv preprint arXiv:.07175, 2020. (2012)"},{"key":"5807_CR94","unstructured":"Fu, Z., et al.: A cross-modal fusion network based on self-attention and residual structure for multimodal emotion recognition, arXiv preprint arXiv:2111.02172, (2021)"},{"key":"5807_CR95","doi-asserted-by":"publisher","first-page":"38","DOI":"10.1016\/j.patrec.2022.07.012","volume":"161","author":"S Verbitskiy","year":"2022","unstructured":"Verbitskiy, S., Berikov, V., Vyshegorodtsev, V.: Eranns: Efficient residual audio neural networks for audio pattern recognition. Pattern Recognition Letters 161, 38\u201344 (2022)","journal-title":"Pattern Recognition Letters"},{"key":"5807_CR96","doi-asserted-by":"crossref","unstructured":"Almulla, M.A.: A multimodal emotion recognition system using deep Convolution neural networks. J. Eng. Res., (2024)","DOI":"10.1109\/AIoT63253.2024.00022"},{"issue":"2","key":"5807_CR97","doi-asserted-by":"publisher","first-page":"207","DOI":"10.1049\/bme2.12012","volume":"10","author":"DGR Kola","year":"2021","unstructured":"Kola, D.G.R., Samayamantula, S.K.: Facial expression recognition using singular values and wavelet-based LGC\u2010HD operator. IET Biometrics 10(2), 207\u2013218 (2021)","journal-title":"IET Biometrics"}],"container-title":["Cluster Computing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10586-025-05807-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10586-025-05807-x","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10586-025-05807-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,19]],"date-time":"2026-03-19T13:08:40Z","timestamp":1773925720000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10586-025-05807-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,11]]},"references-count":97,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2026,2]]}},"alternative-id":["5807"],"URL":"https:\/\/doi.org\/10.1007\/s10586-025-05807-x","relation":{},"ISSN":["1386-7857","1573-7543"],"issn-type":[{"value":"1386-7857","type":"print"},{"value":"1573-7543","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,11,11]]},"assertion":[{"value":"12 March 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 October 2025","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 October 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"11 November 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval"}}],"article-number":"15"}}