{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,24]],"date-time":"2025-11-24T16:46:58Z","timestamp":1764002818867,"version":"3.45.0"},"reference-count":45,"publisher":"Springer Science and Business Media LLC","issue":"15","license":[{"start":{"date-parts":[[2025,9,28]],"date-time":"2025-09-28T00:00:00Z","timestamp":1759017600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,9,28]],"date-time":"2025-09-28T00:00:00Z","timestamp":1759017600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"the National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["No.62202390, No.62176216","No.62202390, No.62176216","No.62202390, No.62176216","No.62202390, No.62176216","No.62202390, No.62176216","No.62202390, No.62176216"],"award-info":[{"award-number":["No.62202390, No.62176216","No.62202390, No.62176216","No.62202390, No.62176216","No.62202390, No.62176216","No.62202390, No.62176216","No.62202390, No.62176216"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"the Science and Technology Fund of Sichuan Province","award":["No.2022NSFSC0556","No.2022NSFSC0556","No.2022NSFSC0556","No.2022NSFSC0556","No.2022NSFSC0556","No.2022NSFSC0556"],"award-info":[{"award-number":["No.2022NSFSC0556","No.2022NSFSC0556","No.2022NSFSC0556","No.2022NSFSC0556","No.2022NSFSC0556","No.2022NSFSC0556"]}]},{"name":"the Opening Project of the Intelligent Policing Key Laboratory of Sichuan Province","award":["No.ZNJW2023KFQN001","No.ZNJW2023KFQN001","No.ZNJW2023KFQN001","No.ZNJW2023KFQN001","No.ZNJW2023KFQN001","No.ZNJW2023KFQN001"],"award-info":[{"award-number":["No.ZNJW2023KFQN001","No.ZNJW2023KFQN001","No.ZNJW2023KFQN001","No.ZNJW2023KFQN001","No.ZNJW2023KFQN001","No.ZNJW2023KFQN001"]}]},{"name":"the State Key Research and Development Project of the Ministry of Science and Technology of Sichuan Province","award":["No.2024YFFK0080","No.2024YFFK0080","No.2024YFFK0080","No.2024YFFK0080","No.2024YFFK0080","No.2024YFFK0080"],"award-info":[{"award-number":["No.2024YFFK0080","No.2024YFFK0080","No.2024YFFK0080","No.2024YFFK0080","No.2024YFFK0080","No.2024YFFK0080"]}]},{"name":"the Xihua University Science and Technology Innovation Competition Project for Postgrad uate Students","award":["No.YK20240135, No.YK20240141","No.YK20240135, No.YK20240141","No.YK20240135, No.YK20240141","No.YK20240135, No.YK20240141","No.YK20240135, No.YK20240141","No.YK20240135, No.YK20240141"],"award-info":[{"award-number":["No.YK20240135, No.YK20240141","No.YK20240135, No.YK20240141","No.YK20240135, No.YK20240141","No.YK20240135, No.YK20240141","No.YK20240135, No.YK20240141","No.YK20240135, No.YK20240141"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2025,12]]},"DOI":"10.1007\/s00371-025-04192-1","type":"journal-article","created":{"date-parts":[[2025,9,28]],"date-time":"2025-09-28T04:11:00Z","timestamp":1759032660000},"page":"12905-12916","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Enhanced multi-Scale Dynamic Facial Expression Recognition via Conditional Random Fields"],"prefix":"10.1007","volume":"41","author":[{"given":"Meichen","family":"Xia","sequence":"first","affiliation":[]},{"given":"Han","family":"Zheng","sequence":"additional","affiliation":[]},{"given":"Hong","family":"Peng","sequence":"additional","affiliation":[]},{"given":"Zhicai","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Jun","family":"Guo","sequence":"additional","affiliation":[]},{"given":"Jiangchao","family":"Long","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,28]]},"reference":[{"key":"4192_CR1","doi-asserted-by":"publisher","first-page":"106621","DOI":"10.1016\/j.cmpb.2022.106621","volume":"215","author":"H Ge","year":"2022","unstructured":"Ge, H., Zhu, Z., Dai, Y., Wang, B., Wu, X.: Facial expression recognition based on deep learning. Comput. Methods Programs Biomed. 215, 106621 (2022)","journal-title":"Comput. Methods Programs Biomed."},{"issue":"3","key":"4192_CR2","doi-asserted-by":"publisher","first-page":"1195","DOI":"10.1109\/TAFFC.2020.2981446","volume":"13","author":"S Li","year":"2020","unstructured":"Li, S., Deng, W.: Deep facial expression recognition: a survey. IEEE Trans. Affect. Comput. 13(3), 1195\u20131215 (2020)","journal-title":"IEEE Trans. Affect. Comput."},{"issue":"3","key":"4192_CR3","doi-asserted-by":"publisher","first-page":"1612","DOI":"10.1109\/TGRS.2018.2867679","volume":"57","author":"FI Alam","year":"2018","unstructured":"Alam, F.I., Zhou, J., Liew, A.W.-C., Jia, X., Chanussot, J., Gao, Y.: Conditional random field and deep feature learning for hyperspectral image classification. IEEE Trans. Geosci. Remote Sens. 57(3), 1612\u20131628 (2018)","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"key":"4192_CR4","doi-asserted-by":"crossref","unstructured":"Lin, G., Shen, C., Van Den\u00a0Hengel, A., Reid, I.: Efficient piecewise training of deep structured models for semantic segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3194\u20133203 (2016)","DOI":"10.1109\/CVPR.2016.348"},{"key":"4192_CR5","unstructured":"Zhou, L., Liu, Z., He, X.: Face parsing via a fully-convolutional continuous crf neural network. arXiv preprint arXiv:1708.03736 (2017)"},{"key":"4192_CR6","doi-asserted-by":"publisher","first-page":"54","DOI":"10.1016\/j.patrec.2018.08.030","volume":"130","author":"L Zhou","year":"2020","unstructured":"Zhou, L., Kong, X., Gong, C., Zhang, F., Zhang, X.: FC-RCCN: fully convolutional residual continuous CRF network for semantic segmentation. Pattern Recogn. Lett. 130, 54\u201363 (2020)","journal-title":"Pattern Recogn. Lett."},{"issue":"5","key":"4192_CR7","doi-asserted-by":"publisher","first-page":"1229","DOI":"10.3390\/rs15051229","volume":"15","author":"X Cheng","year":"2023","unstructured":"Cheng, X., Lei, H.: Semantic segmentation of remote sensing imagery based on multiscale deformable CNN and DenseCRF. Remote Sens. 15(5), 1229 (2023)","journal-title":"Remote Sens."},{"issue":"5","key":"4192_CR8","doi-asserted-by":"publisher","first-page":"817","DOI":"10.62110\/sciencein.jist.2024.v12.817","volume":"12","author":"S Patil-Kashid","year":"2024","unstructured":"Patil-Kashid, S., Patil, Y., Kashid, A.S.: Facial expression recognition for wild dataset using LBP features and random forest classifier. J. Integrated Sci. Technol. 12(5), 817\u2013817 (2024)","journal-title":"J. Integrated Sci. Technol."},{"issue":"9","key":"4192_CR9","doi-asserted-by":"publisher","first-page":"2659","DOI":"10.1049\/ipr2.12817","volume":"17","author":"X Guo","year":"2023","unstructured":"Guo, X., Lu, S., Wang, S., Lu, Z., Zhang, Y.: DLSANet: facial expression recognition with double-code LBP-layer spatial-attention network. IET Image Proc. 17(9), 2659\u20132672 (2023)","journal-title":"IET Image Proc."},{"issue":"9","key":"4192_CR10","doi-asserted-by":"publisher","first-page":"4204","DOI":"10.3390\/s23094204","volume":"23","author":"J Liao","year":"2023","unstructured":"Liao, J., Lin, Y., Ma, T., He, S., Liu, X., He, G.: Facial expression recognition methods in the wild based on fusion feature of attention mechanism and LBP. Sensors 23(9), 4204 (2023)","journal-title":"Sensors"},{"key":"4192_CR11","doi-asserted-by":"publisher","first-page":"102862","DOI":"10.1016\/j.jvcir.2020.102862","volume":"71","author":"Z Sun","year":"2020","unstructured":"Sun, Z., Hu, Z.-P., Zhao, M., Li, S.: Multi-scale active patches fusion based on spatiotemporal LBP-top for micro-expression recognition. J. Vis. Commun. Image Represent. 71, 102862 (2020)","journal-title":"J. Vis. Commun. Image Represent."},{"issue":"5","key":"4192_CR12","first-page":"1038","volume":"33","author":"I Firouzian","year":"2020","unstructured":"Firouzian, I., Firouzian, N., Hashemi, S., Kozegar, E.: Pain facial expression recognition from video sequences using spatio-temporal local binary patterns and tracking fiducial points. Int. J. Eng. 33(5), 1038\u20131047 (2020)","journal-title":"Int. J. Eng."},{"key":"4192_CR13","doi-asserted-by":"publisher","first-page":"206","DOI":"10.1016\/j.ins.2023.03.105","volume":"634","author":"X Chen","year":"2023","unstructured":"Chen, X., Zheng, X., Sun, K., Liu, W., Zhang, Y.: Self-supervised vision transformer-based few-shot learning for facial expression recognition. Inf. Sci. 634, 206\u2013226 (2023)","journal-title":"Inf. Sci."},{"key":"4192_CR14","doi-asserted-by":"publisher","first-page":"109157","DOI":"10.1016\/j.patcog.2022.109157","volume":"135","author":"Z Sun","year":"2023","unstructured":"Sun, Z., Zhang, H., Bai, J., Liu, M., Hu, Z.: A discriminatively deep fusion approach with improved conditional gan (im-cgan) for facial expression recognition. Pattern Recogn. 135, 109157 (2023)","journal-title":"Pattern Recogn."},{"issue":"4","key":"4192_CR15","first-page":"1819","volume":"15","author":"R Singh","year":"2023","unstructured":"Singh, R., Saurav, S., Kumar, T., Saini, R., Vohra, A., Singh, S.: Facial expression recognition in videos using hybrid CNN & ConvLSTM. Int. J. Inf. Technol. 15(4), 1819\u20131830 (2023)","journal-title":"Int. J. Inf. Technol."},{"key":"4192_CR16","doi-asserted-by":"crossref","unstructured":"Lei, L., Chen, T., Li, S., Li, J.: Micro-expression recognition based on facial graph representation learning and facial action unit fusion. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 1571\u20131580 (2021)","DOI":"10.1109\/CVPRW53098.2021.00173"},{"key":"4192_CR17","doi-asserted-by":"publisher","first-page":"4057","DOI":"10.1109\/TIP.2019.2956143","volume":"29","author":"K Wang","year":"2020","unstructured":"Wang, K., Peng, X., Yang, J., Meng, D., Qiao, Y.: Region attention networks for pose and occlusion robust facial expression recognition. IEEE Trans. Image Process. 29, 4057\u20134069 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"4192_CR18","doi-asserted-by":"publisher","first-page":"6544","DOI":"10.1109\/TIP.2021.3093397","volume":"30","author":"Z Zhao","year":"2021","unstructured":"Zhao, Z., Liu, Q., Wang, S.: Learning deep global multi-scale and local attention features for facial expression recognition in the wild. IEEE Trans. Image Process. 30, 6544\u20136556 (2021)","journal-title":"IEEE Trans. Image Process."},{"issue":"4","key":"4192_CR19","doi-asserted-by":"publisher","first-page":"323","DOI":"10.1016\/j.vrih.2023.06.011","volume":"6","author":"X Senhua","year":"2024","unstructured":"Senhua, X., Liqing, G., Liang, W., Wei, F.: Multi-scale context-aware network for continuous sign language recognition. Virtual Real. Intell. Hardw. 6(4), 323\u2013337 (2024)","journal-title":"Virtual Real. Intell. Hardw."},{"issue":"3","key":"4192_CR20","doi-asserted-by":"publisher","first-page":"59","DOI":"10.3390\/technologies10030059","volume":"10","author":"I Kansizoglou","year":"2022","unstructured":"Kansizoglou, I., Misirlis, E., Tsintotas, K., Gasteratos, A.: Continuous emotion recognition for long-term behavior modeling through recurrent neural networks. Technologies 10(3), 59 (2022)","journal-title":"Technologies"},{"key":"4192_CR21","doi-asserted-by":"crossref","unstructured":"Sepas-Moghaddam, A., Etemad, A., Pereira, F., Correia, P.L.: Facial emotion recognition using light field images with deep attention-based bidirectional lstm. In: ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3367\u20133371 (2020). IEEE","DOI":"10.1109\/ICASSP40776.2020.9053919"},{"key":"4192_CR22","doi-asserted-by":"publisher","first-page":"111","DOI":"10.1016\/j.aiopen.2022.10.001","volume":"3","author":"T Lin","year":"2022","unstructured":"Lin, T., Wang, Y., Liu, X., Qiu, X.: A survey of transformers. AI Open 3, 111\u2013132 (2022)","journal-title":"AI Open"},{"key":"4192_CR23","unstructured":"Wasi, A.T., \u0160erbetar, K., Islam, R., Rafi, T.H., Chae, D.-K.: Arbex: Attentive feature extraction with reliability balancing for robust facial expression learning. arXiv preprint arXiv:2305.01486 (2023)"},{"key":"4192_CR24","doi-asserted-by":"publisher","first-page":"50","DOI":"10.1109\/TMM.2021.3120873","volume":"25","author":"X Lin","year":"2021","unstructured":"Lin, X., Sun, S., Huang, W., Sheng, B., Li, P., Feng, D.D.: Eapt: efficient attention pyramid transformer for image processing. IEEE Trans. Multimedia 25, 50\u201361 (2021)","journal-title":"IEEE Trans. Multimedia"},{"key":"4192_CR25","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"issue":"3","key":"4192_CR26","doi-asserted-by":"publisher","first-page":"0265115","DOI":"10.1371\/journal.pone.0265115","volume":"17","author":"G Yang","year":"2022","unstructured":"Yang, G., Yang, Y., Lu, Z., Yang, J., Liu, D., Zhou, C., Fan, Z.: STA-TSN: spatial-temporal attention temporal segment network for action recognition in video. PLoS ONE 17(3), 0265115 (2022)","journal-title":"PLoS ONE"},{"key":"4192_CR27","unstructured":"Grigsby, J., Wang, Z., Nguyen, N., Qi, Y.: Long-range transformers for dynamic spatiotemporal forecasting. arXiv preprint arXiv:2109.12218 (2021)"},{"key":"4192_CR28","doi-asserted-by":"crossref","unstructured":"Zhao, Z., Liu, Q.: Former-dfer: Dynamic facial expression recognition transformer. In: Proceedings of the 29th ACM International Conference on Multimedia, pp. 1553\u20131561 (2021)","DOI":"10.1145\/3474085.3475292"},{"key":"4192_CR29","doi-asserted-by":"crossref","unstructured":"Ma, F., Sun, B., Li, S.: Logo-former: Local-global spatio-temporal transformer for dynamic facial expression recognition. In: ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1\u20135 (2023). IEEE","DOI":"10.1109\/ICASSP49357.2023.10095448"},{"key":"4192_CR30","doi-asserted-by":"crossref","unstructured":"Jiang, X., Zong, Y., Zheng, W., Tang, C., Xia, W., Lu, C., Liu, J.: Dfew: A large-scale database for recognizing dynamic facial expressions in the wild. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 2881\u20132889 (2020)","DOI":"10.1145\/3394171.3413620"},{"key":"4192_CR31","doi-asserted-by":"crossref","unstructured":"Wang, Y., Sun, Y., Huang, Y., Liu, Z., Gao, S., Zhang, W., Ge, W., Zhang, W.: Ferv39k: A large-scale multi-scene dataset for facial expression recognition in videos. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 20922\u201320931 (2022)","DOI":"10.1109\/CVPR52688.2022.02025"},{"issue":"3","key":"4192_CR32","doi-asserted-by":"publisher","first-page":"682","DOI":"10.3390\/math11030682","volume":"11","author":"Y Tian","year":"2023","unstructured":"Tian, Y., Zhang, Y., Zhang, H.: Recent advances in stochastic gradient descent in deep learning. Mathematics 11(3), 682 (2023)","journal-title":"Mathematics"},{"key":"4192_CR33","unstructured":"Ma, F., Sun, B., Li, S.: Spatio-temporal transformer for dynamic facial expression recognition in the wild. arXiv preprint arXiv:2205.04749 (2022)"},{"key":"4192_CR34","doi-asserted-by":"crossref","unstructured":"Tran, D., Wang, H., Torresani, L., Ray, J., LeCun, Y., Paluri, M.: A closer look at spatiotemporal convolutions for action recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6450\u20136459 (2018)","DOI":"10.1109\/CVPR.2018.00675"},{"key":"4192_CR35","doi-asserted-by":"crossref","unstructured":"Hara, K., Kataoka, H., Satoh, Y.: Can spatiotemporal 3d cnns retrace the history of 2d cnns and imagenet? In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6546\u20136555 (2018)","DOI":"10.1109\/CVPR.2018.00685"},{"key":"4192_CR36","doi-asserted-by":"crossref","unstructured":"Liu, Y., Feng, C., Yuan, X., Zhou, L., Wang, W., Qin, J., Luo, Z.: Clip-aware expressive feature learning for video-based facial expression recognition. Inf. Sci. 598, 182\u2013195 (2022)","DOI":"10.1016\/j.ins.2022.03.062"},{"key":"4192_CR37","doi-asserted-by":"crossref","unstructured":"Wang, Y., Sun, Y., Song, W., Gao, S., Huang, Y., Chen, Z., Ge, W., Zhang, W.: Dpcnet: Dual path multi-excitation collaborative network for facial expression representation learning in videos. In: Proceedings of the 30th ACM International Conference on Multimedia, pp. 101\u2013110 (2022)","DOI":"10.1145\/3503161.3547865"},{"key":"4192_CR38","doi-asserted-by":"publisher","first-page":"109368","DOI":"10.1016\/j.patcog.2023.109368","volume":"138","author":"Y Liu","year":"2023","unstructured":"Liu, Y., Wang, W., Feng, C., Zhang, H., Chen, Z., Zhan, Y.: Expression snippet transformer for robust video-based facial expression recognition. Pattern Recogn. 138, 109368 (2023)","journal-title":"Pattern Recogn."},{"key":"4192_CR39","doi-asserted-by":"crossref","unstructured":"Sathisha, G., Subbaraya, C., Ravikumar, G.: Facial expression recognition in video using 3d-cnn deep features discrimination. In: 2024 3rd International Conference for Innovation in Technology (INOCON), pp. 1\u20136 (2024). IEEE","DOI":"10.1109\/INOCON60754.2024.10512101"},{"key":"4192_CR40","unstructured":"Zhao, J., Wei, X., Bo, L.: R1-omni: Explainable omni-multimodal emotion recognition with reinforcement learning. arXiv preprint arXiv:2503.05379 (2025)"},{"key":"4192_CR41","doi-asserted-by":"crossref","unstructured":"Zhang, Y., Liu, F., Song, J., Zeng, Q., He, H.: Mtfnet: Multi-scale transformer framework for robust emotion monitoring in group learning settings. In: 2024 Asia Pacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC), pp. 1\u20138 (2024). IEEE","DOI":"10.1109\/APSIPAASC63619.2025.10848828"},{"key":"4192_CR42","doi-asserted-by":"crossref","unstructured":"Foteinopoulou, N.M., Patras, I.: Emoclip: A vision-language method for zero-shot video facial expression recognition. In: 2024 IEEE 18th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1\u201310 (2024). IEEE","DOI":"10.1109\/FG59268.2024.10581982"},{"key":"4192_CR43","doi-asserted-by":"crossref","unstructured":"Zhao, Z., Cao, Y., Gong, S., Patras, I.: Enhancing zero-shot facial expression recognition by llm knowledge transfer. In: 2025 IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV), pp. 815\u2013824 (2025). IEEE","DOI":"10.1109\/WACV61041.2025.00089"},{"key":"4192_CR44","doi-asserted-by":"crossref","unstructured":"Chen, Y., Li, J., Shan, S., Wang, M., Hong, R.: From static to dynamic: Adapting landmark-aware image models for facial expression recognition in videos. IEEE Transactions on Affective Computing (2024)","DOI":"10.1109\/TAFFC.2024.3453443"},{"issue":"301","key":"4192_CR45","first-page":"1","volume":"23","author":"TT Cai","year":"2022","unstructured":"Cai, T.T., Ma, R.: Theoretical foundations of t-sne for visualizing high-dimensional clustered data. J. Mach. Learn. Res. 23(301), 1\u201354 (2022)","journal-title":"J. Mach. Learn. Res."}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-04192-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-025-04192-1\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-04192-1.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,20]],"date-time":"2025-11-20T13:15:39Z","timestamp":1763644539000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-025-04192-1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,28]]},"references-count":45,"journal-issue":{"issue":"15","published-print":{"date-parts":[[2025,12]]}},"alternative-id":["4192"],"URL":"https:\/\/doi.org\/10.1007\/s00371-025-04192-1","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"type":"print","value":"0178-2789"},{"type":"electronic","value":"1432-2315"}],"subject":[],"published":{"date-parts":[[2025,9,28]]},"assertion":[{"value":"22 March 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"12 September 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 September 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"All authors disclosed no relevant relationships.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest\/Conflict of interest"}},{"value":"This study utilizes publicly available facial expression recognition datasets, specifically the DFEW (diverse facial expressions in the wild) and FERV39K (facial expression recognition in 39K) datasets. The DFEW dataset was created by Jiang et al. and includes a variety of facial expressions in real-world settings. The FERV39K dataset was developed by Wang et al. and is a large-scale dataset designed for training facial expression recognition models. The use of the experimental data has been authorized by the dataset providers. Since both datasets are publicly available and were ethically collected with informed consent, no additional ethical approval is required for their use in this study.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethics approval and consent to participate"}},{"value":"Not applicable.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}},{"value":"The dataset on which the conclusions of this study are based are provided by the authorities and need to be accessed after institutional approval.","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"Materials availability"}},{"value":"The code is available at\n                      \n                      eng520\/MS3, Doi:10.5281\/zenodo.15068421, Markdown:[![DOI](\n                      \n                      )](\n                      \n                      ), image url:\n                      \n                      , target url:\n                      \n                      .","order":6,"name":"Ethics","group":{"name":"EthicsHeading","label":"Code availability"}},{"value":"The authors declare no Conflict of interest.","order":7,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}