{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,19]],"date-time":"2026-02-19T06:18:39Z","timestamp":1771481919274,"version":"3.50.1"},"reference-count":76,"publisher":"Springer Science and Business Media LLC","issue":"2","license":[{"start":{"date-parts":[[2025,12,28]],"date-time":"2025-12-28T00:00:00Z","timestamp":1766880000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0"},{"start":{"date-parts":[[2026,1,16]],"date-time":"2026-01-16T00:00:00Z","timestamp":1768521600000},"content-version":"vor","delay-in-days":19,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Artif Intell Rev"],"DOI":"10.1007\/s10462-025-11468-4","type":"journal-article","created":{"date-parts":[[2025,12,28]],"date-time":"2025-12-28T05:15:23Z","timestamp":1766898923000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Emotion-aware adaptation of CLIP model for facial expression recognition"],"prefix":"10.1007","volume":"59","author":[{"given":"Jing","family":"Huan","sequence":"first","affiliation":[]},{"given":"Mingxing","family":"Li","sequence":"additional","affiliation":[]},{"given":"Haoliang","family":"Zhou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,12,28]]},"reference":[{"key":"11468_CR1","unstructured":"Bar A, Gandelsman Y, Darrell T, Globerson A, Efros A (2022) Visual prompting via image inpainting. In: Advances in neural information processing systems, pp 25005\u201325017"},{"issue":"4","key":"11468_CR2","doi-asserted-by":"publisher","first-page":"949","DOI":"10.1109\/TAFFC.2019.2907628","volume":"12","author":"M Bishay","year":"2019","unstructured":"Bishay M, Palasek P, Priebe S, Patras I (2019) SchiNet: automatic estimation of symptoms of schizophrenia from facial behaviour analysis. IEEE Trans Affect Comput 12(4):949\u2013961","journal-title":"IEEE Trans Affect Comput"},{"issue":"8","key":"11468_CR3","doi-asserted-by":"publisher","first-page":"5619","DOI":"10.1109\/TII.2022.3141400","volume":"18","author":"C Bisogni","year":"2022","unstructured":"Bisogni C, Castiglione A, Hossain S, Narducci F, Umer S (2022) Impact of deep learning approaches on facial expression recognition in healthcare industries. IEEE Trans Indus Inf 18(8):5619\u20135627","journal-title":"IEEE Trans Indus Inf"},{"issue":"3","key":"11468_CR4","doi-asserted-by":"publisher","first-page":"1927","DOI":"10.1109\/TAFFC.2022.3156920","volume":"14","author":"J Cai","year":"2022","unstructured":"Cai J, Meng Z, Khan AS, Li Z, O\u2019Reilly J, Tong Y (2022) Probabilistic attribute tree structured convolutional neural networks for facial expression recognition in the wild. IEEE Trans Affect Comput 14(3):1927\u20131941","journal-title":"IEEE Trans Affect Comput"},{"issue":"8","key":"11468_CR5","doi-asserted-by":"publisher","first-page":"3848","DOI":"10.1109\/TCSVT.2023.3234312","volume":"33","author":"D Chen","year":"2023","unstructured":"Chen D, Wen G, Li H, Chen R, Li C (2023) Multi-relations aware network for in-the-wild facial expression recognition. IEEE Trans Circuits Syst Video Technol 33(8):3848\u20133859","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"11468_CR6","doi-asserted-by":"crossref","unstructured":"Dalal N, Triggs B (2005) Histograms of oriented gradients for human detection. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 886\u2013893","DOI":"10.1109\/CVPR.2005.177"},{"key":"11468_CR7","unstructured":"Devlin J, Chang M-W, Lee K, Toutanova K (2018) Bert: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805"},{"key":"11468_CR8","unstructured":"Dosovitskiy A, Beyer L, Kolesnikov A, Weissenborn D, Zhai X, Unterthiner T, Dehghani M, Minderer M, Heigold G, Gelly S, et al. (2021) An image is worth 16x16 words: transformers for image recognition at scale. In: International conference on learning representations"},{"key":"11468_CR9","doi-asserted-by":"crossref","unstructured":"Foteinopoulou NM, Patras I (2024) EmoCLIP: A vision-language method for zero-shot video facial expression recognition. In: 2024 IEEE 18th international conference on automatic face and gesture recognition (FG), pp 1\u201310. IEEE","DOI":"10.1109\/FG59268.2024.10581982"},{"issue":"2","key":"11468_CR10","doi-asserted-by":"publisher","first-page":"581","DOI":"10.1007\/s11263-023-01891-x","volume":"132","author":"P Gao","year":"2024","unstructured":"Gao P, Geng S, Zhang R, Ma T, Fang R, Zhang Y, Li H, Qiao Y (2024) Clip-adapter: better vision-language models with feature adapters. Int J Comput Vision 132(2):581\u2013595","journal-title":"Int J Comput Vision"},{"key":"11468_CR11","doi-asserted-by":"crossref","unstructured":"Guo Z, Dong B, Ji Z, Bai J, Guo Y, Zuo W (2023) Texts as images in prompt tuning for multi-label image recognition. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 2808\u20132817","DOI":"10.1109\/CVPR52729.2023.00275"},{"key":"11468_CR12","doi-asserted-by":"crossref","unstructured":"He K, Chen X, Xie S, Li Y, Doll\u00e1r P, Girshick R (2022) Masked autoencoders are scalable vision learners. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 16000\u201316009","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"11468_CR13","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"issue":"7","key":"11468_CR14","doi-asserted-by":"publisher","first-page":"3952","DOI":"10.1109\/TII.2018.2884211","volume":"15","author":"C Hong","year":"2018","unstructured":"Hong C, Yu J, Zhang J, Jin X, Lee K-H (2018) Multimodal face-pose estimation with multitask manifold deep learning. IEEE Trans Industr Inf 15(7):3952\u20133961","journal-title":"IEEE Trans Industr Inf"},{"key":"11468_CR15","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2021.103224","volume":"208","author":"C Hong","year":"2021","unstructured":"Hong C, Chen L, Liang Y, Zeng Z (2021) Stacked capsule graph autoencoders for geometry-aware 3D head pose estimation. Comput Vis Image Underst 208:103224","journal-title":"Comput Vis Image Underst"},{"issue":"4","key":"11468_CR16","doi-asserted-by":"publisher","first-page":"2399","DOI":"10.1109\/JIOT.2017.2772959","volume":"5","author":"MS Hossain","year":"2017","unstructured":"Hossain MS, Muhammad G (2017) Emotion-aware connected healthcare big data towards 5G. IEEE Internet Things J 5(4):2399\u20132406","journal-title":"IEEE Internet Things J"},{"key":"11468_CR17","doi-asserted-by":"publisher","first-page":"35","DOI":"10.1016\/j.ins.2021.08.043","volume":"580","author":"Q Huang","year":"2021","unstructured":"Huang Q, Huang C, Wang X, Jiang F (2021) Facial expression recognition with grid-wise attention and visual transformer. Inf Sci 580:35\u201354","journal-title":"Inf Sci"},{"key":"11468_CR18","doi-asserted-by":"crossref","unstructured":"Jia M, Tang L, Chen B-C, Cardie C, Belongie S, Hariharan B, Lim S-N (2022) Visual prompt tuning. In: European conference on computer vision, pp 709\u2013727","DOI":"10.1007\/978-3-031-19827-4_41"},{"key":"11468_CR19","unstructured":"Jia C, Yang Y, Xia Y, Chen Y-T, Parekh Z, Pham H, Le Q, Sung Y-H, Li Z, Duerig T (2021) Scaling up visual and vision-language representation learning with noisy text supervision. In: International conference on machine learning, pp 4904\u20134916"},{"key":"11468_CR20","unstructured":"Kuo W, Cui Y, Gu X, Piergiovanni A, Angelova A (2023) Open-vocabulary object detection upon frozen vision and language models. In: International conference on learning representations"},{"key":"11468_CR21","doi-asserted-by":"publisher","first-page":"949","DOI":"10.1007\/s10462-017-9578-y","volume":"52","author":"B Lahasan","year":"2019","unstructured":"Lahasan B, Lutfi SL, San-Segundo R (2019) A survey on techniques to handle face recognition challenges: occlusion, single sample per subject and expression. Artif Intell Rev 52:949\u2013979","journal-title":"Artif Intell Rev"},{"key":"11468_CR22","doi-asserted-by":"publisher","first-page":"2016","DOI":"10.1109\/TIP.2021.3049955","volume":"30","author":"H Li","year":"2021","unstructured":"Li H, Wang N, Ding X, Yang X, Gao X (2021) Adaptively learning facial expression representation via CF labels and distillation. IEEE Trans Image Process 30:2016\u20132028","journal-title":"IEEE Trans Image Process"},{"issue":"2","key":"11468_CR23","doi-asserted-by":"publisher","first-page":"882","DOI":"10.1109\/TCSVT.2023.3237006","volume":"34","author":"C Li","year":"2023","unstructured":"Li C, Li X, Wang X, Huang D, Liu Z, Liao L (2023) FG-AGR: fine-grained associative graph representation for facial expression recognition in the wild. IEEE Trans Circuits Syst Video Technol 34(2):882\u2013896","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"key":"11468_CR24","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2024.129142","volume":"619","author":"J Li","year":"2025","unstructured":"Li J, Zhou H, Qian Y, Dong Z, Wang S-J (2025) Micro-expression recognition using dual-view self-supervised contrastive learning with intensity perception. Neurocomputing 619:129142","journal-title":"Neurocomputing"},{"key":"11468_CR25","doi-asserted-by":"crossref","unstructured":"Li S, Deng W, Du J (2017) Reliable crowdsourcing and deep locality-preserving learning for expression recognition in the wild. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 2852\u20132861","DOI":"10.1109\/CVPR.2017.277"},{"key":"11468_CR26","unstructured":"Li Y, Liang F, Zhao L, Cui Y, Ouyang W, Shao J, Yu F, Yan J (2021) Supervision exists everywhere: a data efficient contrastive language-image pre-training paradigm. arXiv preprint arXiv:2110.05208"},{"key":"11468_CR27","doi-asserted-by":"crossref","unstructured":"Li H, Niu H, Zhu Z, Zhao F (2024) CLIPER: a unified vision-language framework for in-the-wild facial expression recognition. In: 2024 IEEE international conference on multimedia and expo (ICME), pp 1\u20136. IEEE","DOI":"10.1109\/ICME57554.2024.10687508"},{"key":"11468_CR28","unstructured":"Li H, Sui M, Zhao F, Zha Z, Wu F (2021) MVT: mask vision transformer for facial expression recognition in the wild. arXiv preprint arXiv:2106.04520"},{"issue":"9","key":"11468_CR29","doi-asserted-by":"publisher","first-page":"6253","DOI":"10.1109\/TCSVT.2022.3165321","volume":"32","author":"H Liu","year":"2022","unstructured":"Liu H, Cai H, Lin Q, Li X, Xiao H (2022) Adaptive multilayer perceptual attention network for facial expression recognition. IEEE Trans Circuits Syst Video Technol 32(9):6253\u20136266","journal-title":"IEEE Trans Circuits Syst Video Technol"},{"issue":"6","key":"11468_CR30","doi-asserted-by":"publisher","first-page":"176","DOI":"10.1007\/s10462-025-11159-0","volume":"58","author":"G Liu","year":"2025","unstructured":"Liu G, Huang S, Wang G, Li M (2025) Emrnet: enhanced micro-expression recognition network with attention and distance correlation. Artif Intell Rev 58(6):176","journal-title":"Artif Intell Rev"},{"key":"11468_CR31","doi-asserted-by":"crossref","unstructured":"Liu H, Zhou Q, Zhang C, Zhu J, Liu T, Zhang Z, Li Y-F (2024) Mmatrans: Muscle movement aware representation learning for facial expression recognition via transformers. IEEE Trans Indus Inform","DOI":"10.1109\/TII.2024.3431640"},{"key":"11468_CR32","doi-asserted-by":"crossref","unstructured":"Li Y, Wang M, Gong M, Lu Y, Liu L (2024) Fer-former: multimodal transformer for facial expression recognition. IEEE Trans Multimed","DOI":"10.1109\/TMM.2024.3521788"},{"key":"11468_CR33","doi-asserted-by":"crossref","unstructured":"Lucey P, Cohn JF, Kanade T, Saragih J, Ambadar Z, Matthews I (2010) The extended cohn-kanade dataset (ck+): A complete dataset for action unit and emotion-specified expression. In: 2010 IEEE Computer society conference on computer vision and pattern recognition-workshops, pp 94\u2013101. IEEE","DOI":"10.1109\/CVPRW.2010.5543262"},{"key":"11468_CR34","doi-asserted-by":"crossref","unstructured":"Lukov T, Zhao N, Lee GH, Lim S-N (2022) Teaching with soft label smoothing for mitigating noisy labels in facial expressions. In: European conference on computer vision, pp 648\u2013665","DOI":"10.1007\/978-3-031-19775-8_38"},{"key":"11468_CR35","doi-asserted-by":"crossref","unstructured":"Lyons M, Akamatsu S, Kamachi M, Gyoba J (1998) Coding facial expressions with gabor wavelets. In: Proceedings Third IEEE international conference on automatic face and gesture recognition, pp 200\u2013205 IEEE","DOI":"10.1109\/AFGR.1998.670949"},{"issue":"2","key":"11468_CR36","doi-asserted-by":"publisher","first-page":"1236","DOI":"10.1109\/TAFFC.2021.3122146","volume":"14","author":"F Ma","year":"2021","unstructured":"Ma F, Sun B, Li S (2021) Facial expression recognition with visual transformers and attentional selective fusion. IEEE Trans Affect Comput 14(2):1236\u20131248","journal-title":"IEEE Trans Affect Comput"},{"key":"11468_CR37","doi-asserted-by":"crossref","unstructured":"Mao J, Xu R, Yin X, Chang Y, Nie B, Huang A, Wang Y (2024) Poster++: a simpler and stronger facial expression recognition network. Pattern Recogniti, 110951","DOI":"10.1016\/j.patcog.2024.110951"},{"issue":"1","key":"11468_CR38","doi-asserted-by":"publisher","first-page":"18","DOI":"10.1109\/TAFFC.2017.2740923","volume":"10","author":"A Mollahosseini","year":"2017","unstructured":"Mollahosseini A, Hasani B, Mahoor MH (2017) AffectNet: a database for facial expression, valence, and arousal computing in the wild. IEEE Trans Affect Comput 10(1):18\u201331","journal-title":"IEEE Trans Affect Comput"},{"issue":"13","key":"11468_CR39","doi-asserted-by":"publisher","first-page":"3812","DOI":"10.1093\/nar\/gkg509","volume":"31","author":"PC Ng","year":"2003","unstructured":"Ng PC, Henikoff S (2003) SIFT: predicting amino acid changes that affect protein function. Nucl Acids Res 31(13):3812\u20133814","journal-title":"Nucl Acids Res"},{"issue":"8","key":"11468_CR40","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford A, Wu J, Child R, Luan D, Amodei D, Sutskever I et al (2019) Language models are unsupervised multitask learners. OpenAI blog 1(8):9","journal-title":"OpenAI blog"},{"key":"11468_CR41","unstructured":"Radford A, Kim JW, Hallacy C, Ramesh A, Goh G, Agarwal S, Sastry G, Askell A, Mishkin P., Clark J, et al. (2021) Learning transferable visual models from natural language supervision. In: International conference on machine learning, pp 8748\u20138763"},{"key":"11468_CR42","doi-asserted-by":"crossref","unstructured":"Rasheed H, Khattak MU, Maaz M, Khan S, Khan FS (2023) Fine-tuned clip models are efficient video learners. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 6545\u20136554","DOI":"10.1109\/CVPR52729.2023.00633"},{"key":"11468_CR43","doi-asserted-by":"crossref","unstructured":"Ruan D, Yan Y, Lai S, Chai Z, Shen C, Wang H (2021) Feature decomposition and reconstruction learning for effective facial expression recognition. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 7660\u20137669","DOI":"10.1109\/CVPR46437.2021.00757"},{"issue":"6","key":"11468_CR44","doi-asserted-by":"publisher","first-page":"803","DOI":"10.1016\/j.imavis.2008.08.005","volume":"27","author":"C Shan","year":"2009","unstructured":"Shan C, Gong S, McOwan PW (2009) Facial expression recognition based on local binary patterns: a comprehensive study. Image Vis Comput 27(6):803\u2013816","journal-title":"Image Vis Comput"},{"key":"11468_CR45","doi-asserted-by":"crossref","unstructured":"She J, Hu Y, Shi H, Wang J, Shen Q, Mei T (2021) Dive into ambiguity: Latent distribution mining and pairwise uncertainty estimation for facial expression recognition. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 6248\u20136257","DOI":"10.1109\/CVPR46437.2021.00618"},{"key":"11468_CR46","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s10462-017-9554-6","volume":"51","author":"Z Sun","year":"2019","unstructured":"Sun Z, Hu Z-P, Wang M, Zhao S-H (2019) Dictionary learning feature space via sparse representation classification for facial expression recognition. Artific Intell Rev 51:1\u201318","journal-title":"Artific Intell Rev"},{"issue":"8","key":"11468_CR47","doi-asserted-by":"publisher","first-page":"6547","DOI":"10.1007\/s10462-022-10160-1","volume":"55","author":"Z Sun","year":"2022","unstructured":"Sun Z, Zhang H, Ma S, Hu Z (2022) Combining filtered dictionary representation based deep subspace filter learning with a discriminative classification criterion for facial expression recognition. Artif Intell Rev 55(8):6547\u20136566","journal-title":"Artif Intell Rev"},{"key":"11468_CR48","doi-asserted-by":"publisher","first-page":"337","DOI":"10.1016\/j.neunet.2023.11.033","volume":"170","author":"H Tao","year":"2024","unstructured":"Tao H, Duan Q (2024) Hierarchical attention network with progressive feature fusion for facial expression recognition. Neural Netw 170:337\u2013348","journal-title":"Neural Netw"},{"key":"11468_CR49","doi-asserted-by":"crossref","unstructured":"Tschannen M, Mustafa B, Houlsby N (2023) Clippo: Image-and-language understanding from pixels only. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 11006\u201311017","DOI":"10.1109\/CVPR52729.2023.01059"},{"key":"11468_CR50","unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez AN, Kaiser L, Polosukhin I (2017) Attention is all you need. In: Advances in neural information processing systems, pp 5998\u20136008"},{"key":"11468_CR51","doi-asserted-by":"publisher","first-page":"4057","DOI":"10.1109\/TIP.2019.2956143","volume":"29","author":"K Wang","year":"2020","unstructured":"Wang K, Peng X, Yang J, Meng D, Qiao Y (2020) Region attention networks for pose and occlusion robust facial expression recognition. IEEE Trans Image Process 29:4057\u20134069","journal-title":"IEEE Trans Image Process"},{"key":"11468_CR52","doi-asserted-by":"crossref","unstructured":"Wang K, Peng X, Yang J, Lu S, Qiao Y (2020) Suppressing uncertainties for large-scale facial expression recognition. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 6897\u20136906","DOI":"10.1109\/CVPR42600.2020.00693"},{"key":"11468_CR53","doi-asserted-by":"crossref","unstructured":"Wang H, Wang Z, Du M, Yang F, Zhang Z, Ding S, Mardziel P, Hu X (2020) Score-cam: score-weighted visual explanations for convolutional neural networks. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 24\u201325","DOI":"10.1109\/CVPRW50498.2020.00020"},{"key":"11468_CR54","doi-asserted-by":"crossref","unstructured":"Wilhelm T (2019) Towards facial expression analysis in a driver assistance system. In: 2019 14th IEEE international conference on automatic face & gesture recognition (FG 2019), pp 1\u20134","DOI":"10.1109\/FG.2019.8756565"},{"issue":"3","key":"11468_CR55","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s00530-024-01322-y","volume":"30","author":"Y Xu","year":"2024","unstructured":"Xu Y, Huang S, Zhou H (2024) Ca-clip: category-aware adaptation of clip model for few-shot class-incremental learning. Multimedia Syst 30(3):1\u201314","journal-title":"Multimedia Syst"},{"key":"11468_CR56","doi-asserted-by":"crossref","unstructured":"Xue F, Wang Q, Guo G (2021) Transfer: Learning relation-aware facial expression representations with transformers. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp 3601\u20133610","DOI":"10.1109\/ICCV48922.2021.00358"},{"key":"11468_CR57","doi-asserted-by":"crossref","unstructured":"Xue F, Wang Q, Tan Z, Ma Z, Guo G (2022) Vision transformer with attentive pooling for robust facial expression recognition. IEEE Trans Affect Comput","DOI":"10.1109\/TAFFC.2022.3226473"},{"key":"11468_CR58","doi-asserted-by":"crossref","unstructured":"Xu M, Zhang Z, Wei F, Lin Y, Cao Y, Hu H, Bai X (2022) A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In: European conference on computer vision, pp 736\u2013753. Springer","DOI":"10.1007\/978-3-031-19818-2_42"},{"key":"11468_CR59","doi-asserted-by":"crossref","unstructured":"Xu M, Zhang Z, Wei F, Lin Y, Cao Y, Hu H, Bai X (2022) A simple baseline for open-vocabulary semantic segmentation with pre-trained vision-language model. In: European conference on computer vision, pp 736\u2013753","DOI":"10.1007\/978-3-031-19818-2_42"},{"key":"11468_CR60","doi-asserted-by":"crossref","unstructured":"Yang L, Zhang R-Y, Wang Y, Xie X (2024) Mma: Multi-modal adapter for vision-language models. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 23826\u201323837","DOI":"10.1109\/CVPR52733.2024.02249"},{"key":"11468_CR61","doi-asserted-by":"crossref","unstructured":"Zareian A, Rosa KD, Hu DH, Chang S-F (2021) Open-vocabulary object detection using captions. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 14393\u201314402","DOI":"10.1109\/CVPR46437.2021.01416"},{"key":"11468_CR62","doi-asserted-by":"crossref","unstructured":"Zeng D, Lin Z, Yan X, Liu Y, Wang F, Tang B (2022) Face2exp: Combating data biases for facial expression recognition. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 20291\u201320300","DOI":"10.1109\/CVPR52688.2022.01965"},{"key":"11468_CR63","unstructured":"Zeng Y, Zhang X, Li H (2022) Multi-grained vision language pre-training: aligning texts with visual concepts. In: International conference on machine learning, pp 25994\u201326009"},{"issue":"1","key":"11468_CR64","doi-asserted-by":"publisher","first-page":"550","DOI":"10.1007\/s11263-017-1055-1","volume":"126","author":"Z Zhang","year":"2018","unstructured":"Zhang Z, Luo P, Loy CC, Tang X (2018) From facial expression recognition to interpersonal relation prediction. Int J Comput Vis 126(1):550\u2013569","journal-title":"Int J Comput Vis"},{"key":"11468_CR65","doi-asserted-by":"crossref","unstructured":"Zhang F, Qu S, Shi F, Xu C (2024) Overcoming the pitfalls of vision-language model for image-text retrieval. In: Proceedings of the 32nd ACM international conference on multimedia, pp 2350\u20132359","DOI":"10.1145\/3664647.3680591"},{"key":"11468_CR66","doi-asserted-by":"crossref","unstructured":"Zhang Y, Wang C, Ling X, Deng W (2022) Learn from all: Erasing attention consistency for noisy label facial expression recognition. In: European conference on computer vision, pp 418\u2013434","DOI":"10.1007\/978-3-031-19809-0_24"},{"key":"11468_CR67","doi-asserted-by":"crossref","unstructured":"Zhang R, Zhang W, Fang R, Gao P, Li K, Dai J, Qiao Y, Li H (2022) Tip-adapter: Training-free adaption of clip for few-shot classification. In: European conference on computer vision, pp 493\u2013510. Springer","DOI":"10.1007\/978-3-031-19833-5_29"},{"key":"11468_CR68","doi-asserted-by":"publisher","first-page":"6544","DOI":"10.1109\/TIP.2021.3093397","volume":"30","author":"Z Zhao","year":"2021","unstructured":"Zhao Z, Liu Q, Wang S (2021) Learning deep global multi-scale and local attention features for facial expression recognition in the wild. IEEE Trans Image Process 30:6544\u20136556","journal-title":"IEEE Trans Image Process"},{"key":"11468_CR69","doi-asserted-by":"crossref","unstructured":"Zhao Z, Liu Q, Zhou F (2021) Robust lightweight facial expression recognition network with label distribution training. In: Proceedings of the AAAI conference on artificial intelligence, pp 3510\u20133519","DOI":"10.1609\/aaai.v35i4.16465"},{"key":"11468_CR70","unstructured":"Zhao Z, Patras I (2023) Prompting visual-language models for dynamic facial expression recognition. In: British machine vision conference (BMVC), pp 1\u201314"},{"issue":"9","key":"11468_CR71","doi-asserted-by":"publisher","first-page":"2337","DOI":"10.1007\/s11263-022-01653-1","volume":"130","author":"K Zhou","year":"2022","unstructured":"Zhou K, Yang J, Loy CC, Liu Z (2022) Learning to prompt for vision-language models. Int J Comput Vision 130(9):2337\u20132348","journal-title":"Int J Comput Vision"},{"issue":"6","key":"11468_CR72","doi-asserted-by":"publisher","first-page":"3863","DOI":"10.1007\/s00530-023-01164-0","volume":"29","author":"H Zhou","year":"2023","unstructured":"Zhou H, Huang S, Xu Y (2023) Inceptr: micro-expression recognition integrating inception-CBAM and vision transformer. Multimedia Syst 29(6):3863\u20133876","journal-title":"Multimedia Syst"},{"issue":"3","key":"11468_CR73","doi-asserted-by":"publisher","first-page":"460","DOI":"10.3390\/e25030460","volume":"25","author":"H Zhou","year":"2023","unstructured":"Zhou H, Huang S, Li J, Wang S-J (2023) Dual-ATME: dual-branch attention network for micro-expression recognition. Entropy 25(3):460","journal-title":"Entropy"},{"key":"11468_CR74","doi-asserted-by":"crossref","unstructured":"Zhou H, Huang S, Xu Y (2025) Ua-fer: uncertainty-aware representation learning for facial expression recognition. Neurocomputing 621","DOI":"10.1016\/j.neucom.2024.129261"},{"key":"11468_CR75","doi-asserted-by":"crossref","unstructured":"Zhou H, Huang S, Zhang F, Xu C (2024) Ceprompt: cross-modal emotion-aware prompting for facial expression recognition. IEEE Trans Circuits Syst Video Technol","DOI":"10.1109\/TCSVT.2024.3424777"},{"key":"11468_CR76","doi-asserted-by":"crossref","unstructured":"Zhou K, Yang J, Loy CC, Liu Z (2022) Conditional prompt learning for vision-language models. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 16816\u201316825","DOI":"10.1109\/CVPR52688.2022.01631"}],"container-title":["Artificial Intelligence Review"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10462-025-11468-4","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10462-025-11468-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10462-025-11468-4.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,19]],"date-time":"2026-02-19T05:45:46Z","timestamp":1771479946000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10462-025-11468-4"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,28]]},"references-count":76,"journal-issue":{"issue":"2","published-online":{"date-parts":[[2026,2]]}},"alternative-id":["11468"],"URL":"https:\/\/doi.org\/10.1007\/s10462-025-11468-4","relation":{},"ISSN":["1573-7462"],"issn-type":[{"value":"1573-7462","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,12,28]]},"assertion":[{"value":"19 May 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"12 December 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 December 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no Conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"66"}}