{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T23:06:12Z","timestamp":1773270372245,"version":"3.50.1"},"reference-count":29,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T00:00:00Z","timestamp":1773187200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T00:00:00Z","timestamp":1773187200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["Grant No. 12002138"],"award-info":[{"award-number":["Grant No. 12002138"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["Grant No. 12002138"],"award-info":[{"award-number":["Grant No. 12002138"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Postgraduate Research & Practice Innovation Program of Jiangsu Province","award":["Grant No. KYCX24_4084"],"award-info":[{"award-number":["Grant No. KYCX24_4084"]}]},{"name":"Postgraduate Research & Practice Innovation Program of Jiangsu Province","award":["Grant No. KYCX24_4084"],"award-info":[{"award-number":["Grant No. KYCX24_4084"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2026,6]]},"DOI":"10.1007\/s00530-026-02319-5","type":"journal-article","created":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T07:52:19Z","timestamp":1773215539000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["FER-YOLO: a YOLO-based classifier for facial expression recognition"],"prefix":"10.1007","volume":"32","author":[{"given":"Yucheng","family":"Jin","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6924-7891","authenticated-orcid":false,"given":"Jiyang","family":"Qi","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,3,11]]},"reference":[{"key":"2319_CR1","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TIM.2025.3578178","volume":"74","author":"CS Jiang","year":"2025","unstructured":"Jiang, C.S., Liu, Z.T., Fukushima, E.F., et al.: Motion semantic enhancement and autonomous information mining for static\u2013dynamic visual emotion recognition in human\u2013robot interaction. IEEE Trans. Instrum. Meas. 74, 1\u201315 (2025). https:\/\/doi.org\/10.1109\/TIM.2025.3578178","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"2319_CR2","doi-asserted-by":"publisher","first-page":"4678","DOI":"10.3390\/app9214678","volume":"9","author":"D Canedo","year":"2019","unstructured":"Canedo, D., Neves, A.J.R.: Facial expression recognition using computer vision: a systematic review. Appl. Sci. 9, 4678 (2019). https:\/\/doi.org\/10.3390\/app9214678","journal-title":"Appl. Sci."},{"key":"2319_CR3","doi-asserted-by":"publisher","first-page":"803","DOI":"10.1016\/j.imavis.2008.08.005","volume":"27","author":"C Shan","year":"2009","unstructured":"Shan, C., Gong, S., McOwan, P.W.: Facial expression recognition based on local binary patterns: a comprehensive study. Image Vis. Comput. 27, 803\u2013816 (2009). https:\/\/doi.org\/10.1016\/j.imavis.2008.08.005","journal-title":"Image Vis. Comput."},{"key":"2319_CR4","doi-asserted-by":"publisher","first-page":"333","DOI":"10.1007\/s11042-017-5317-2","volume":"43","author":"F Xu","year":"2017","unstructured":"Xu, F., Zhang, J.P.: A survey on facial micro-expression recognition. Acta Autom. Sin. 43, 333\u2013348 (2017). https:\/\/doi.org\/10.1007\/s11042-017-5317-2","journal-title":"Acta Autom. Sin"},{"key":"2319_CR5","doi-asserted-by":"publisher","unstructured":"Kumar, P., Happy, S.L., Routray, A.: A real-time robust facial expression recognition system using HOG features. In: Proceedings of the International Conference on Computing, Analytics and Security Trends (CAST), pp. 289\u2013293. (2016). https:\/\/doi.org\/10.1109\/CAST.2016.7914982","DOI":"10.1109\/CAST.2016.7914982"},{"key":"2319_CR6","doi-asserted-by":"publisher","first-page":"1340","DOI":"10.1016\/j.patcog.2008.10.010","volume":"42","author":"Y Cheon","year":"2009","unstructured":"Cheon, Y., Kim, D.: Natural facial expression recognition using differential-AAM and manifold learning. Pattern Recogn. 42, 1340\u20131350 (2009). https:\/\/doi.org\/10.1016\/j.patcog.2008.10.010","journal-title":"Pattern Recogn."},{"key":"2319_CR7","doi-asserted-by":"publisher","first-page":"9943","DOI":"10.1109\/TII.2022.3233650","volume":"19","author":"CS Jiang","year":"2023","unstructured":"Jiang, C.S., Liu, Z.T., Wu, M., et al.: Efficient facial expression recognition with representation reinforcement network and transfer self-training for human\u2013machine interaction. IEEE Trans. Industr Inf. 19, 9943\u20139952 (2023). https:\/\/doi.org\/10.1109\/TII.2022.3233650","journal-title":"IEEE Trans. Industr Inf."},{"key":"2319_CR8","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2103.14030","author":"Z Liu","year":"2021","unstructured":"Liu, Z., Lin, Y., Cao, Y., et al., et al.: Swin transformer: hierarchical vision transformer using shifted windows. arXiv preprint arXiv:2103 14030. (2021). https:\/\/doi.org\/10.48550\/arXiv.2103.14030","journal-title":"arXiv preprint arXiv:2103 14030"},{"key":"2319_CR9","doi-asserted-by":"publisher","unstructured":"Sandler, M., Howard, A., Zhu, M., et al.: Mobilenetv2: inverted residuals and linear bottlenecks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4510\u20134520. (2018). https:\/\/doi.org\/10.1109\/CVPR.2018.00474","DOI":"10.1109\/CVPR.2018.00474"},{"key":"2319_CR10","doi-asserted-by":"publisher","unstructured":"Howard, A., Sandler, M., Chen, B., et al.: Searching for mobilenetv3. In Proceedings of the IEEE\/CVF international conference on computer vision, pp. 1314\u20131324. (2019). https:\/\/doi.org\/10.1109\/ICCV.2019.00140","DOI":"10.1109\/ICCV.2019.00140"},{"key":"2319_CR11","doi-asserted-by":"publisher","unstructured":"Tan, M., Le, Q.V.: Efficientnet: rethinking model scaling for convolutional neural networks. arXiv preprint arXiv: 1905.11946. (2019). https:\/\/doi.org\/10.48550\/arXiv.1905.11946","DOI":"10.48550\/arXiv.1905.11946"},{"key":"2319_CR12","doi-asserted-by":"publisher","first-page":"6125","DOI":"10.3390\/app14146125","volume":"14","author":"M Bie","year":"2024","unstructured":"Bie, M., Xu, H., Gao, Y., et al.: Swin-FER: swin transformer for facial expression recognition. Appl. Sci. 14, 6125 (2024). https:\/\/doi.org\/10.3390\/app14146125","journal-title":"Appl. Sci."},{"key":"2319_CR13","doi-asserted-by":"publisher","first-page":"1071","DOI":"10.13442\/j.cnki.cges.008237","volume":"45","author":"CM Yan","year":"2023","unstructured":"Yan, C.M., Zhang, X., Wang, Q.P.: Facial expression recognition based on improved MobileNetV2. Comput. Eng. Sci. 45, 1071\u20131078 (2023). https:\/\/doi.org\/10.13442\/j.cnki.cges.008237","journal-title":"Comput. Eng. Sci."},{"key":"2319_CR14","doi-asserted-by":"publisher","first-page":"522","DOI":"10.37188\/CJLCD.2023-0153","volume":"39","author":"YH Zuo","year":"2024","unstructured":"Zuo, Y.H., Bai, W.S., He, Q.S.: NCA-MobileNet: a lightweight facial expression recognition method. Chin. J. Liq Cryst. Displays. 39, 522\u2013531 (2024). https:\/\/doi.org\/10.37188\/CJLCD.2023-0153","journal-title":"Chin. J. Liq Cryst. Displays"},{"key":"2319_CR15","unstructured":"Jocher, G., Chaurasian, A., Qiu, J.: YOLOv8. (2023). https:\/\/github.com\/ultralytics\/ultralytics"},{"key":"2319_CR16","doi-asserted-by":"publisher","unstructured":"Wang, C.Y., Yeh, I.H., Liao., H.Y.M.: YOLOv9: Learning what you want to learn using programmable gradient information. arXiv preprint arXiv:2402.13616. (2024). https:\/\/doi.org\/10.48550\/arXiv.2402.13616","DOI":"10.48550\/arXiv.2402.13616"},{"key":"2319_CR17","doi-asserted-by":"publisher","unstructured":"Wang, C.Y., Liao, H.Y.M., Wu, Y.H., et al.: CSPNet: A new backbone that can enhance learning capability of CNN. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 390\u2013391. (2020). https:\/\/doi.org\/10.1109\/CVPRW50498.2020.00203","DOI":"10.1109\/CVPRW50498.2020.00203"},{"key":"2319_CR18","doi-asserted-by":"publisher","unstructured":"Ding, X., Zhang, X., Ma, N., et al.: RepVGG: Making VGG-style convnets great again, In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 13733\u201313742. (2021). https:\/\/doi.org\/10.1109\/CVPR46437.2021.01352","DOI":"10.1109\/CVPR46437.2021.01352"},{"key":"2319_CR19","doi-asserted-by":"publisher","unstructured":"Szegedy, C., Liu, W., Jia, Y., et al.: Going deeper with convolutions, In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1\u20139. (2015). https:\/\/doi.org\/10.1109\/CVPR.2015.7298594","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"2319_CR20","doi-asserted-by":"publisher","unstructured":"Szegedy, C., Vanhoucke, V., Ioffe, S., et al.: Rethinking the inception architecture for computer vision, In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2818\u20132826. (2016). https:\/\/doi.org\/10.1109\/CVPR.2016.308","DOI":"10.1109\/CVPR.2016.308"},{"key":"2319_CR21","doi-asserted-by":"publisher","unstructured":"Wang, C.Y., Bochkovskiy, A., Liao, H.Y.M.: YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 7464\u20137475. (2023). https:\/\/doi.org\/10.1109\/CVPR52729.2023.00721","DOI":"10.1109\/CVPR52729.2023.00721"},{"key":"2319_CR22","doi-asserted-by":"publisher","first-page":"1905","DOI":"10.1007\/s10462-022-10213-5","volume":"56","author":"S Cong","year":"2023","unstructured":"Cong, S., Zhou, Y.: A review of convolutional neural network architectures and their optimizations. Artif. Intell. Rev. 56, 1905\u20131969 (2023). https:\/\/doi.org\/10.1007\/s10462-022-10213-5","journal-title":"Artif. Intell. Rev."},{"key":"2319_CR23","doi-asserted-by":"publisher","first-page":"59","DOI":"10.1016\/j.neunet.2014.09.005","volume":"64","author":"IJ Goodfellow","year":"2013","unstructured":"Goodfellow, I.J., Erhan, D., Carrier, P.L., et al.: Challenges in representation learning: a report on three machine learning contests. Neural Netw. 64, 59\u201363 (2013). https:\/\/doi.org\/10.1016\/j.neunet.2014.09.005","journal-title":"Neural Netw."},{"key":"2319_CR24","unstructured":"Li, Y., Wang, R., Luo, X., et al.: RAF-DB: A large-scale real-world Affective Faces Database. (2017). https:\/\/paperswithcode.com\/dataset\/raf-db"},{"key":"2319_CR25","doi-asserted-by":"publisher","unstructured":"Chattopadhyay, A., Sarkar, A., Howlader, P., et al.: Grad-CAM++: Generalized gradient-based visual explanations for deep convolutional networks, In: Proceedings of the IEEE winter conference on applications of computer vision (WACV), pp. 839\u2013847. (2018). https:\/\/doi.org\/10.1109\/WACV.2018.00097","DOI":"10.1109\/WACV.2018.00097"},{"key":"2319_CR26","doi-asserted-by":"publisher","first-page":"321","DOI":"10.3390\/info15060321","volume":"15","author":"B Jiang","year":"2024","unstructured":"Jiang, B., Li, N., Cui, X., et al.: Research on facial expression recognition algorithm based on lightweight transformer. Inf. 15, 321 (2024). https:\/\/doi.org\/10.3390\/info15060321","journal-title":"Inf"},{"key":"2319_CR27","doi-asserted-by":"publisher","first-page":"203","DOI":"10.19769\/j.zdhy.2024.08.065","volume":"65","author":"X Ding","year":"2024","unstructured":"Ding, X., Tang, H., Shi, S., et al.: Facial expression recognition method based on improved EfficientNet. Autom. Appl. 65, 203\u2013206 (2024). https:\/\/doi.org\/10.19769\/j.zdhy.2024.08.065","journal-title":"Autom. Appl."},{"key":"2319_CR28","doi-asserted-by":"publisher","unstructured":"Khanam, R., Hussain, M.: Yolov11: An overview of the key architectural enhancements. arXiv preprint arXiv:2402.13616. (2024). https:\/\/doi.org\/10.48550\/arXiv.2410.17725","DOI":"10.48550\/arXiv.2410.17725"},{"key":"2319_CR29","doi-asserted-by":"publisher","unstructured":"Tian, Y., Ye, Q., Doermann, D.: Yolov12: Attention-centric real-time object detectors. arXiv preprint arXiv:2502.12524. (2025). https:\/\/doi.org\/10.48550\/arXiv.2502.12524","DOI":"10.48550\/arXiv.2502.12524"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-026-02319-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-026-02319-5","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-026-02319-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,11]],"date-time":"2026-03-11T07:52:23Z","timestamp":1773215543000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-026-02319-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3,11]]},"references-count":29,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2026,6]]}},"alternative-id":["2319"],"URL":"https:\/\/doi.org\/10.1007\/s00530-026-02319-5","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,3,11]]},"assertion":[{"value":"26 August 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"25 February 2026","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"11 March 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"229"}}