{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,23]],"date-time":"2025-11-23T13:31:53Z","timestamp":1763904713113,"version":"3.40.3"},"reference-count":28,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T00:00:00Z","timestamp":1740096000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T00:00:00Z","timestamp":1740096000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"the National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["NO. 62266025","NO. 62266025","NO. 62266025","NO. 62266025"],"award-info":[{"award-number":["NO. 62266025","NO. 62266025","NO. 62266025","NO. 62266025"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["SIViP"],"published-print":{"date-parts":[[2025,4]]},"DOI":"10.1007\/s11760-025-03889-z","type":"journal-article","created":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T22:50:17Z","timestamp":1740178217000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Expression recognition method based on feature redundancy optimization"],"prefix":"10.1007","volume":"19","author":[{"given":"Dangguo","family":"Shao","sequence":"first","affiliation":[]},{"given":"Luwei","family":"Zhuang","sequence":"additional","affiliation":[]},{"given":"Lei","family":"Ma","sequence":"additional","affiliation":[]},{"given":"Sanli","family":"Yi","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,21]]},"reference":[{"key":"3889_CR1","unstructured":"Mao, J., et al.: POSTER++: A Simpler and Stronger Facial Expression Recognition Network. arXiv:2301.12149 (2023)"},{"key":"3889_CR2","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"3889_CR3","doi-asserted-by":"crossref","first-page":"610","DOI":"10.1016\/j.patcog.2016.07.026","volume":"61","author":"AT Lopes","year":"2017","unstructured":"Lopes, A.T., de Aguiar, E., De Souza, A.F., Oliveira-Santos, T.: Facial expression recognition with convolutional neural networks: coping with few data and the training sample order. Pattern Recogn. 61, 610\u2013628 (2017)","journal-title":"Pattern Recogn."},{"issue":"9","key":"3889_CR4","doi-asserted-by":"crossref","first-page":"4193","DOI":"10.1109\/TIP.2017.2689999","volume":"26","author":"K Zhang","year":"2017","unstructured":"Zhang, K., Huang, Y., Du, Y., Wang, L.: Facial expression recognition based on deep evolutional spatial-temporal networks. IEEE Trans. Image Process. 26(9), 4193\u20134203 (2017)","journal-title":"IEEE Trans. Image Process."},{"key":"3889_CR5","unstructured":"Vats, A., Chadha, A.: Facial Expression Recognition using Squeeze and Excitation-powered Swin Transformers. arXiv:2301.10906 (2023)"},{"issue":"17","key":"3889_CR6","doi-asserted-by":"crossref","first-page":"3595","DOI":"10.3390\/electronics12173595","volume":"12","author":"S Zhang","year":"2023","unstructured":"Zhang, S., et al.: A dual-direction attention mixed feature network for facial expression recognition. Electronics 12(17), 3595 (2023)","journal-title":"Electronics"},{"key":"3889_CR7","doi-asserted-by":"crossref","unstructured":"Li, J., Wen, Y., He, L.: Scconv: spatial and channel reconstruction convolution for feature redundancy. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2023)","DOI":"10.1109\/CVPR52729.2023.00596"},{"key":"3889_CR8","doi-asserted-by":"crossref","unstructured":"Zhang, Q., et al.: Split to be slim: an overlooked redundancy in vanilla convolution. arXiv:2006.12085 (2020)","DOI":"10.24963\/ijcai.2020\/442"},{"key":"3889_CR9","doi-asserted-by":"crossref","unstructured":"Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (2018)","DOI":"10.1109\/CVPR.2018.00745"},{"key":"3889_CR10","unstructured":"Liu, Y., Shao, Z., Hoffmann, N.: Global Attention Mechanism: Retain Information to Enhance Channel-Spatial Interactions. arXiv:2112.05561 (2021)"},{"key":"3889_CR11","unstructured":"Zhou, D., et al.: Understanding the robustness in vision transformers. In: International Conference on Machine Learning. PMLR (2022)"},{"key":"3889_CR12","doi-asserted-by":"crossref","unstructured":"Yu, F., Koltun, V., Funkhouser, T.: Dilated residual networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (2017)","DOI":"10.1109\/CVPR.2017.75"},{"key":"3889_CR13","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. In: Advances in Neural Information Processing Systems, vol. 25 (2012)"},{"key":"3889_CR14","unstructured":"Howard, A.G., et al.: Mobilenets: Efficient Convolutional Neural Networks for Mobile Vision Applications arXiv:1704.04861 (2017)"},{"key":"3889_CR15","doi-asserted-by":"crossref","unstructured":"Zeiler, M.D., et al.: Deconvolutional networks. In: 2010 IEEE Computer Society Conference on Computer Vision and Pattern Recognition. IEEE (2010).","DOI":"10.1109\/CVPR.2010.5539957"},{"key":"3889_CR16","unstructured":"Duta, I.C., et al.: Pyramidal Convolution: Rethinking Convolutional Neural Networks for Visual Recognition. arXiv:2006.11538 (2020)"},{"key":"3889_CR17","unstructured":"Yang, B., et al.: Condconv: conditionally parameterized convolutions for efficient inference. In: Advances in Neural Information Processing Systems, vol. 32 (2019)"},{"key":"3889_CR18","doi-asserted-by":"crossref","unstructured":"Chen, Y., et al.: Dynamic convolution: attention over convolution kernels. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (2020)","DOI":"10.1109\/CVPR42600.2020.01104"},{"key":"3889_CR19","doi-asserted-by":"crossref","unstructured":"Zheng, C., Matias, M., Chen, C.: Poster: a pyramid cross-fusion transformer network for facial expression recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (2023)","DOI":"10.1109\/ICCVW60793.2023.00339"},{"key":"3889_CR20","doi-asserted-by":"crossref","unstructured":"Li, S., Deng, W., Du, J.: Reliable crowdsourcing and deep locality-preserving learning for expression recognition in the wild. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2584\u20132593. IEEE (2017)","DOI":"10.1109\/CVPR.2017.277"},{"key":"3889_CR21","doi-asserted-by":"crossref","unstructured":"Barsoum, E., Zhang, C., Ferrer, C.C., Zhang, Z.: Training deep networks for facial expression recognition with crowd-sourced label distribution. In: ACM International Conference on Multimodal Interaction (ICMI) (2016)","DOI":"10.1145\/2993148.2993165"},{"key":"3889_CR22","doi-asserted-by":"crossref","unstructured":"Goodfellow, I.J., Erhan, D., Carrier, P.L., Courville, A., Mirza, M., Hamner, B., Cukierski, W., Tang, Y., Thaler, D., Lee, D.-H., et al.: Challenges in representation learning: a report on three machine learning contests. In: International Conference on Neural Information Processing, pp. 117\u2013124. Springer, Berlin (2013)","DOI":"10.1007\/978-3-642-42051-1_16"},{"key":"3889_CR23","doi-asserted-by":"crossref","first-page":"4057","DOI":"10.1109\/TIP.2019.2956143","volume":"29","author":"K Wang","year":"2020","unstructured":"Wang, K., Peng, X., Yang, J., Meng, D., Qiao, Yu.: Region attention networks for pose and occlusion robust facial expression recognition. IEEE Trans. Image Process. 29, 4057\u20134069 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"3889_CR24","doi-asserted-by":"crossref","first-page":"1927","DOI":"10.1109\/TAFFC.2022.3156920","volume":"14","author":"J Cai","year":"2022","unstructured":"Cai, J., Meng, Z., Khan, A.S., Li, Z., O\u2019Reilly, J., Tong, Y.: Probabilistic attribute tree structured convolutional neural networks for facial expression recognition in the wild. IEEE Trans. Affect. Comput.Comput. 14, 1927\u20131941 (2022)","journal-title":"IEEE Trans. Affect. Comput.Comput."},{"key":"3889_CR25","doi-asserted-by":"crossref","first-page":"1868","DOI":"10.1109\/TAFFC.2022.3197761","volume":"13","author":"J Jiang","year":"2022","unstructured":"Jiang, J., Deng, W.: Disentangling identity and pose for facial expression recognition. IEEE Trans. Affect. Comput. 13, 1868\u20131878 (2022)","journal-title":"IEEE Trans. Affect. Comput."},{"issue":"2","key":"3889_CR26","doi-asserted-by":"crossref","first-page":"199","DOI":"10.3390\/biomimetics8020199","volume":"8","author":"Z Wen","year":"2023","unstructured":"Wen, Z., et al.: Distract your attention: multi-head cross attention network for facial expression recognition. Biomimetics 8(2), 199 (2023)","journal-title":"Biomimetics"},{"key":"3889_CR27","doi-asserted-by":"crossref","unstructured":"Zhang, Y., et al.: Learn from all: erasing attention consistency for noisy label facial expression recognition. In: European Conference on Computer Vision. Springer, Cham (2022)","DOI":"10.1007\/978-3-031-19809-0_24"},{"key":"3889_CR28","unstructured":"Li, J., et al.: Emotion Separation and Recognition from a Facial Expression by Generating the Poker Face with Vision Transformers. arXiv:2207.11081 (2022)"}],"container-title":["Signal, Image and Video Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-025-03889-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11760-025-03889-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-025-03889-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,2]],"date-time":"2025-04-02T01:28:30Z","timestamp":1743557310000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11760-025-03889-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,2,21]]},"references-count":28,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2025,4]]}},"alternative-id":["3889"],"URL":"https:\/\/doi.org\/10.1007\/s11760-025-03889-z","relation":{},"ISSN":["1863-1703","1863-1711"],"issn-type":[{"type":"print","value":"1863-1703"},{"type":"electronic","value":"1863-1711"}],"subject":[],"published":{"date-parts":[[2025,2,21]]},"assertion":[{"value":"23 August 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 September 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"27 January 2025","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 February 2025","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"325"}}