{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,7]],"date-time":"2026-03-07T16:49:34Z","timestamp":1772902174683,"version":"3.50.1"},"reference-count":34,"publisher":"Springer Science and Business Media LLC","issue":"9","license":[{"start":{"date-parts":[[2025,4,9]],"date-time":"2025-04-09T00:00:00Z","timestamp":1744156800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,4,9]],"date-time":"2025-04-09T00:00:00Z","timestamp":1744156800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Humanities and Social Science Fund of the Ministry of Education of the People\u2019s Republic of China","award":["22YJAZH036"],"award-info":[{"award-number":["22YJAZH036"]}]},{"name":"Humanities and Social Science Fund of the Ministry of Education of the People\u2019s Republic of China","award":["22YJAZH036"],"award-info":[{"award-number":["22YJAZH036"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int. J. Mach. Learn. &amp; Cyber."],"published-print":{"date-parts":[[2025,9]]},"DOI":"10.1007\/s13042-025-02614-z","type":"journal-article","created":{"date-parts":[[2025,4,9]],"date-time":"2025-04-09T13:33:01Z","timestamp":1744205581000},"page":"6081-6093","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Contrastive learning distillation of non-target categories for facial expression recognition"],"prefix":"10.1007","volume":"16","author":[{"given":"Heng-Yu","family":"An","sequence":"first","affiliation":[]},{"given":"Rui-Sheng","family":"Jia","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,4,9]]},"reference":[{"key":"2614_CR1","doi-asserted-by":"crossref","unstructured":"Zhao B, Cui Q, Song R, Qiu Y, Liang J (2022) Decoupled knowledge distillation. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 11953\u201311962","DOI":"10.1109\/CVPR52688.2022.01165"},{"key":"2614_CR2","unstructured":"Hinton G, Vinyals O, Dean JJ (2015) Distilling the knowledge in a neural network. Preprint at arXiv:1503.02531"},{"key":"2614_CR3","unstructured":"Romero A, Ballas N, Kahou SE, Chassang A, Gatta C, Bengio YJ (2014) Fitnets: hints for thin deep nets. Preprint at arXiv:1412.6550"},{"key":"2614_CR4","unstructured":"Devlin J, Chang M-W, Lee K, Toutanova KJ (2019) Bert: pre-training of deep bidirectional transformers for language understanding. In: Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies, pp 4171\u20134186"},{"key":"2614_CR5","unstructured":"Achiam J, Adler S, Agarwal S, Ahmad L, Akkaya I, Aleman FL et al (2023) Gpt-4 technical report. Preprint at arXiv:2303.08774"},{"key":"2614_CR6","unstructured":"Chen X, Fan H, Girshick R, He KJ (2020) Improved baselines with momentum contrastive learning. Preprint at arXiv:2003.04297"},{"key":"2614_CR7","doi-asserted-by":"crossref","unstructured":"He K, Fan H, Wu Y, Xie S, Girshick R (2020) Momentum contrast for unsupervised visual representation learning. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 9729\u20139738","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"2614_CR8","first-page":"19290","volume":"33","author":"Y Yang","year":"2020","unstructured":"Yang Y, Xu ZJ (2020) Rethinking the value of labels for improving class-imbalanced learning. Adv Neural Inf Process Syst 33:19290\u201319301","journal-title":"Adv Neural Inf Process Syst"},{"key":"2614_CR9","doi-asserted-by":"publisher","DOI":"10.1186\/1687-5281-2012-17","author":"A Poursaberi","year":"2012","unstructured":"Poursaberi A, Noubari HA, Gavrilova M, Yanushkevich SN (2012) Gauss\u2013Laguerre wavelet textural feature fusion with geometrical information for facial expression identification. J Image Video Proc. https:\/\/doi.org\/10.1186\/1687-5281-2012-17","journal-title":"J Image Video Proc"},{"key":"2614_CR10","unstructured":"Chen J, Chen Z, Chi Z, Fu H (2014) Facial expression recognition based on facial components detection and hog features. In: International workshops on electrical and computer engineering subfields, pp 884\u2013888"},{"issue":"7","key":"2614_CR11","doi-asserted-by":"publisher","first-page":"1056","DOI":"10.1016\/j.engappai.2007.11.010","volume":"21","author":"S Bashyal","year":"2008","unstructured":"Bashyal S, Venayagamoorthy GK (2008) Recognition of facial expressions using Gabor wavelets and learning vector quantization. Eng Appl Artif Intell 21(7):1056\u20131064","journal-title":"Eng Appl Artif Intell"},{"issue":"5\u20136","key":"2614_CR12","doi-asserted-by":"publisher","first-page":"555","DOI":"10.1016\/S0893-6080(03)00115-1","volume":"16","author":"M Matsugu","year":"2003","unstructured":"Matsugu M, Mori K, Mitari Y, Kaneda YJ (2003) Subject independent facial expression recognition with robust face detection using a convolutional neural network. Neural Netw 16(5\u20136):555\u2013559","journal-title":"Neural Netw"},{"key":"2614_CR13","doi-asserted-by":"crossref","unstructured":"Mollahosseini A, Chan D, Mahoor MH (2016) Going deeper in facial expression recognition using deep neural networks. In: 2016 IEEE winter conference on applications of computer vision (WACV). IEEE, pp 1\u201310","DOI":"10.1109\/WACV.2016.7477450"},{"key":"2614_CR14","doi-asserted-by":"crossref","unstructured":"Levi G, Hassner T (2015) Emotion recognition in the wild via convolutional neural networks and mapped binary patterns. In: Proceedings of the 2015 ACM on international conference on multimodal interaction, pp 503\u2013510","DOI":"10.1145\/2818346.2830587"},{"key":"2614_CR15","doi-asserted-by":"crossref","unstructured":"Hu P, Cai D, Wang S, Yao A, Chen Y (2017) Learning supervised scoring ensemble for emotion recognition in the wild. In: Proceedings of the 19th ACM international conference on multimodal interaction, pp 553\u2013560","DOI":"10.1145\/3136755.3143009"},{"key":"2614_CR16","doi-asserted-by":"crossref","unstructured":"Cai J, Meng Z, Khan AS, Li Z, O\u2019Reilly J, Tong Y (2018) Island loss for learning discriminative features in facial expression recognition. In: 2018 13th IEEE international conference on automatic face & gesture recognition (FG 2018). IEEE, pp 302\u2013309","DOI":"10.1109\/FG.2018.00051"},{"key":"2614_CR17","doi-asserted-by":"crossref","unstructured":"Li S, Deng W, Du J (2017) Reliable crowdsourcing and deep locality-preserving learning for expression recognition in the wild. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 2852\u20132861","DOI":"10.1109\/CVPR.2017.277"},{"issue":"1","key":"2614_CR18","doi-asserted-by":"publisher","first-page":"18","DOI":"10.1109\/TAFFC.2017.2740923","volume":"10","author":"A Mollahosseini","year":"2017","unstructured":"Mollahosseini A, Hasani B, Mahoor MHJ (2017) Affectnet: A database for facial expression, valence, and arousal computing in the wild. IEEE Trans Affect Comput 10(1):18\u201331","journal-title":"IEEE Trans Affect Comput"},{"key":"2614_CR19","doi-asserted-by":"publisher","first-page":"23","DOI":"10.1016\/j.cogsys.2020.03.002","volume":"62","author":"GV Reddy","year":"2020","unstructured":"Reddy GV, Savarni CD, Mukherjee SJ (2020) Facial expression recognition in the wild, by fusion of deep learnt and hand-crafted features. Cogn Syst Res 62:23\u201334","journal-title":"Cogn Syst Res"},{"key":"2614_CR20","doi-asserted-by":"crossref","unstructured":"Wang K, Peng X, Yang J, Lu S, Qiao Y (2020) Suppressing uncertainties for large-scale facial expression recognition. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 6897\u20136906","DOI":"10.1109\/CVPR42600.2020.00693"},{"key":"2614_CR21","doi-asserted-by":"publisher","first-page":"2016","DOI":"10.1109\/TIP.2021.3049955","volume":"30","author":"H Li","year":"2021","unstructured":"Li H, Wang N, Ding X, Yang X, Gao XJ (2021) Adaptively learning facial expression representation via cf labels and distillation. IEEE Trans Image Process 30:2016\u20132028","journal-title":"IEEE Trans Image Process"},{"key":"2614_CR22","doi-asserted-by":"publisher","first-page":"4637","DOI":"10.1109\/TIP.2022.3186536","volume":"31","author":"H Li","year":"2022","unstructured":"Li H, Wang N, Yang X, Gao XJ (2022) Crs-cont: a well-trained general encoder for facial expression analysis. IEEE Trans Image Process 31:4637\u20134650","journal-title":"IEEE Trans Image Process"},{"issue":"1","key":"2614_CR23","doi-asserted-by":"publisher","first-page":"173","DOI":"10.1109\/TAFFC.2023.3263886","volume":"15","author":"H Li","year":"2023","unstructured":"Li H, Wang N, Yang X, Wang X, Gao XJ (2023) Unconstrained facial expression recognition with no-reference de-elements learning. IEEE Trans Affect Comput 15(1):173\u2013185","journal-title":"IEEE Trans Affect Comput"},{"key":"2614_CR24","unstructured":"Oord Avd, Li Y, Vinyals O J (2018) Representation learning with contrastive predictive coding. Preprint at arXiv:1807.03748"},{"key":"2614_CR25","doi-asserted-by":"crossref","unstructured":"Barsoum E, Zhang C, Ferrer CC, Zhang Z (2016) Training deep networks for facial expression recognition with crowd-sourced label distribution. In: Proceedings of the 18th ACM international conference on multimodal interaction, pp. 279\u2013283","DOI":"10.1145\/2993148.2993165"},{"key":"2614_CR26","doi-asserted-by":"publisher","first-page":"4057","DOI":"10.1109\/TIP.2019.2956143","volume":"29","author":"K Wang","year":"2020","unstructured":"Wang K, Peng X, Yang J, Meng D, Qiao YJ (2020) Region attention networks for pose and occlusion robust facial expression recognition. IEEE Trans Image Process 29:4057\u20134069","journal-title":"IEEE Trans Image Process"},{"issue":"3","key":"2614_CR27","doi-asserted-by":"publisher","first-page":"1143","DOI":"10.1109\/TCDS.2021.3100131","volume":"14","author":"Y Xia","year":"2021","unstructured":"Xia Y, Yu H, Wang X, Jian M, Wang F-YJ (2021) Relation-aware facial expression recognition. IEEE Trans Cogn Dev Syst 14(3):1143\u20131154","journal-title":"IEEE Trans Cogn Dev Syst"},{"key":"2614_CR28","doi-asserted-by":"publisher","first-page":"6544","DOI":"10.1109\/TIP.2021.3093397","volume":"30","author":"Z Zhao","year":"2021","unstructured":"Zhao Z, Liu Q, Wang SJ (2021) Learning deep global multi-scale and local attention features for facial expression recognition in the wild. IEEE Trans Image Process 30:6544\u20136556","journal-title":"IEEE Trans Image Process"},{"key":"2614_CR29","doi-asserted-by":"crossref","unstructured":"Zeng D, Lin Z, Yan X, Liu Y, Wang F, Tang B (2022) Face2exp: Combating data biases for facial expression recognition. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 20291\u201320300","DOI":"10.1109\/CVPR52688.2022.01965"},{"issue":"3","key":"2614_CR30","doi-asserted-by":"publisher","first-page":"2035","DOI":"10.1007\/s00371-023-02900-3","volume":"40","author":"H Xia","year":"2024","unstructured":"Xia H, Lu L, Song SJ (2024) Feature fusion of multi-granularity and multi-scale for facial expression recognition. Vis Comput 40(3):2035\u20132047","journal-title":"Vis Comput"},{"issue":"10","key":"2614_CR31","doi-asserted-by":"publisher","first-page":"4709","DOI":"10.1007\/s00371-022-02619-7","volume":"39","author":"H Li","year":"2023","unstructured":"Li H, Xiao X, Liu X, Guo J, Wen G, Liang PJT (2023) Heuristic objective for facial expression recognition. Vis Comput 39(10):4709\u20134720","journal-title":"Vis Comput"},{"issue":"4","key":"2614_CR32","doi-asserted-by":"publisher","first-page":"3244","DOI":"10.1109\/TAFFC.2022.3226473","volume":"14","author":"F Xue","year":"2022","unstructured":"Xue F, Wang Q, Tan Z, Ma Z, Guo GJ (2022) Vision transformer with attentive pooling for robust facial expression recognition. IEEE Trans Affect Comput 14(4):3244\u20133256","journal-title":"IEEE Trans Affect Comput"},{"key":"2614_CR33","doi-asserted-by":"crossref","unstructured":"Lei J, Liu Z, Li T, Zou Z, Feng Z, Xu J et al (2022) Enhanced dual-level representations for facial expression recognition. In: 2022 IEEE international conference on image processing (ICIP). IEEE, pp 2241\u20132245","DOI":"10.1109\/ICIP46576.2022.9897281"},{"key":"2614_CR34","doi-asserted-by":"crossref","unstructured":"Neo D, Chen T, Winkler S (2023) Large-scale facial expression recognition using dual-domain affect fusion for noisy labels. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp 5692\u20135700","DOI":"10.1109\/CVPRW59228.2023.00603"}],"container-title":["International Journal of Machine Learning and Cybernetics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13042-025-02614-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s13042-025-02614-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13042-025-02614-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,6]],"date-time":"2025-09-06T11:00:00Z","timestamp":1757156400000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s13042-025-02614-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4,9]]},"references-count":34,"journal-issue":{"issue":"9","published-print":{"date-parts":[[2025,9]]}},"alternative-id":["2614"],"URL":"https:\/\/doi.org\/10.1007\/s13042-025-02614-z","relation":{},"ISSN":["1868-8071","1868-808X"],"issn-type":[{"value":"1868-8071","type":"print"},{"value":"1868-808X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,4,9]]},"assertion":[{"value":"25 September 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"10 March 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 April 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}