{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,3]],"date-time":"2026-03-03T01:32:01Z","timestamp":1772501521750,"version":"3.50.1"},"reference-count":60,"publisher":"Springer Science and Business Media LLC","issue":"12","license":[{"start":{"date-parts":[[2024,8,14]],"date-time":"2024-08-14T00:00:00Z","timestamp":1723593600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,8,14]],"date-time":"2024-08-14T00:00:00Z","timestamp":1723593600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Natural Science Foundation of Shandong Province China","award":["NO. ZR2022LZH003"],"award-info":[{"award-number":["NO. ZR2022LZH003"]}]},{"DOI":"10.13039\/100014103","name":"Key Technology Research and Development Program of Shandong Province","doi-asserted-by":"publisher","award":["NO.2021CXGC010506"],"award-info":[{"award-number":["NO.2021CXGC010506"]}],"id":[{"id":"10.13039\/100014103","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["NO.62101311"],"award-info":[{"award-number":["NO.62101311"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["SIViP"],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1007\/s11760-024-03501-w","type":"journal-article","created":{"date-parts":[[2024,8,14]],"date-time":"2024-08-14T14:02:09Z","timestamp":1723644129000},"page":"8693-8705","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["Attentional visual graph neural network based facial expression recognition method"],"prefix":"10.1007","volume":"18","author":[{"given":"Wenmin","family":"Dong","sequence":"first","affiliation":[]},{"given":"Xiangwei","family":"Zheng","sequence":"additional","affiliation":[]},{"given":"Lifeng","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Yuang","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,8,14]]},"reference":[{"key":"3501_CR1","doi-asserted-by":"publisher","first-page":"817","DOI":"10.1016\/j.aej.2023.01.017","volume":"68","author":"M Sajjad","year":"2023","unstructured":"Sajjad, M., Ullah, F.U.M., Ullah, M., Christodoulou, G., Cheikh, F.A., Hijji, M., Muhammad, K., Rodrigues, J.J.: A comprehensive survey on deep facial expression recognition: challenges, applications, and future guidelines. Alex. Eng. J. 68, 817\u2013840 (2023)","journal-title":"Alex. Eng. J."},{"key":"3501_CR2","first-page":"43","volume":"2","author":"D Kong","year":"2019","unstructured":"Kong, D., Zhu, M., Yu, J.: Research on the application and method of facial expression recognition in assistive medical care. Life Sci. Instr. 2, 43\u201348 (2019)","journal-title":"Life Sci. Instr."},{"key":"3501_CR3","first-page":"122784","volume":"2023","author":"I Saadi","year":"2023","unstructured":"Saadi, I., Abdelmalik, T.-A., Hadid, A., El Hillali, Y., et al.: Driver\u00e2\u0102\u0179 facial expression recognition: a comprehensive survey. Expert Syst. Appl. 2023, 122784 (2023)","journal-title":"Expert Syst. Appl."},{"key":"3501_CR4","unstructured":"Lu, F., Liu, B.: Affective digital twins for digital human: bridging the gap in human\u2013machine affective interaction (2023)"},{"key":"3501_CR5","doi-asserted-by":"crossref","unstructured":"Chen, X., Zheng, X., Sun, K., Liu, W., Zhang, Y.: Self-supervised vision transformer-based few-shot learning for facial expression recognition. Inform. Sci. 634, 206\u2013226 (2023)","DOI":"10.1016\/j.ins.2023.03.105"},{"key":"3501_CR6","doi-asserted-by":"crossref","unstructured":"Dong, X., Tan, L., Zhou, L., Song, Y.: Scene recognition in short video with multi-resolution cnns. In: 2019 2nd International Conference on Artificial Intelligence and Big Data, pp. 419\u2013422 (2019)","DOI":"10.1109\/ICAIBD.2019.8837029"},{"issue":"6","key":"3501_CR7","doi-asserted-by":"publisher","first-page":"803","DOI":"10.1016\/j.imavis.2008.08.005","volume":"27","author":"C Shan","year":"2009","unstructured":"Shan, C., Gong, S., McOwan, P.W.: Facial expression recognition based on local binary patterns: a comprehensive study. Image Vis. Comput. 27(6), 803\u2013816 (2009)","journal-title":"Image Vis. Comput."},{"issue":"1","key":"3501_CR8","doi-asserted-by":"publisher","first-page":"645","DOI":"10.1186\/s40064-015-1427-3","volume":"4","author":"P Carcagn\u00ec","year":"2015","unstructured":"Carcagn\u00ec, P., Del Coco, M., Leo, M., Distante, C.: Facial expression recognition and histograms of oriented gradients: a comprehensive study. Springerplus 4(1), 645 (2015)","journal-title":"Springerplus"},{"key":"3501_CR9","doi-asserted-by":"crossref","unstructured":"Rathi, P., Sharma, R., Singal, P., Lamba, P.S., Chaudhary, G., Al-Turjman, F.: Micro-expression recognition using 3d-cnn layering. In: AI-powered IoT for COVID-19, pp. 123\u2013140 (2020)","DOI":"10.1201\/9781003098881-6"},{"key":"3501_CR10","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"3501_CR11","first-page":"8291","volume":"35","author":"K Han","year":"2022","unstructured":"Han, K., Wang, Y., Guo, J., Tang, Y., Wu, E.: Vision gnn: an image is worth graph of nodes. Adv. Neural. Inf. Process. Syst. 35, 8291\u20138303 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"issue":"16","key":"3501_CR12","doi-asserted-by":"publisher","first-page":"11243","DOI":"10.1007\/s00500-023-08531-z","volume":"27","author":"Y Xie","year":"2023","unstructured":"Xie, Y., Tian, W., Zhang, H., Ma, T.: Facial expression recognition through multi-level features extraction and fusion. Soft. Comput. 27(16), 11243\u201311258 (2023)","journal-title":"Soft. Comput."},{"key":"3501_CR13","doi-asserted-by":"publisher","first-page":"817","DOI":"10.1016\/j.aej.2023.01.017","volume":"68","author":"M Sajjad","year":"2023","unstructured":"Sajjad, M., Ullah, F.U.M., Ullah, M., Christodoulou, G., Cheikh, F.A., Hijji, M., Muhammad, K., Rodrigues, J.J.: A comprehensive survey on deep facial expression recognition: challenges, applications, and future guidelines. Alex. Eng. J. 68, 817\u2013840 (2023)","journal-title":"Alex. Eng. J."},{"issue":"8","key":"3501_CR14","doi-asserted-by":"publisher","first-page":"5619","DOI":"10.1109\/TII.2022.3141400","volume":"18","author":"C Bisogni","year":"2022","unstructured":"Bisogni, C., Castiglione, A., Hossain, S., Narducci, F., Umer, S.: Impact of deep learning approaches on facial expression recognition in healthcare industries. IEEE Trans. Ind. Inf. 18(8), 5619\u20135627 (2022)","journal-title":"IEEE Trans. Ind. Inf."},{"issue":"14","key":"3501_CR15","doi-asserted-by":"publisher","DOI":"10.3788\/LOP57.141026","volume":"57","author":"X Yang","year":"2020","unstructured":"Yang, X., Shang, Z.: Facial expression recognition based on improved alexnet. Laser Optoelectron. Prog. 57(14), 141026 (2020)","journal-title":"Laser Optoelectron. Prog."},{"issue":"2","key":"3501_CR16","doi-asserted-by":"publisher","first-page":"439","DOI":"10.1007\/s40031-021-00681-8","volume":"103","author":"JD Bodapati","year":"2022","unstructured":"Bodapati, J.D., Srilakshmi, U., Veeranjaneyulu, N.: Fernet: a deep cnn architecture for facial expression recognition in the wild. J. Inst. Eng. (India) Ser. B 103(2), 439\u2013448 (2022)","journal-title":"J. Inst. Eng. (India) Ser. B"},{"issue":"4","key":"3501_CR17","doi-asserted-by":"publisher","first-page":"2132","DOI":"10.1109\/TAFFC.2022.3188390","volume":"13","author":"AV Savchenko","year":"2022","unstructured":"Savchenko, A.V., Savchenko, L.V., Makarov, I.: Classifying emotions and engagement in online learning based on a single facial expression recognition neural network. IEEE Trans. Affect. Comput. 13(4), 2132\u20132143 (2022)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"3501_CR18","doi-asserted-by":"publisher","DOI":"10.1016\/j.asoc.2023.110530","volume":"145","author":"X Wu","year":"2023","unstructured":"Wu, X., He, J., Huang, Q., Huang, C., Zhu, J., Huang, X., Fujita, H.: Fer-chc: Facial expression recognition with cross-hierarchy contrast. Appl. Soft Comput. 145, 110530 (2023)","journal-title":"Appl. Soft Comput."},{"key":"3501_CR19","doi-asserted-by":"publisher","first-page":"9","DOI":"10.1016\/j.patrec.2022.01.013","volume":"155","author":"D Gera","year":"2022","unstructured":"Gera, D., Balasubramanian, S., Jami, A.: Cern: Compact facial expression recognition net. Pattern Recognit. Lett. 155, 9\u201318 (2022)","journal-title":"Pattern Recognit. Lett."},{"issue":"1","key":"3501_CR20","doi-asserted-by":"publisher","first-page":"61","DOI":"10.1109\/TNN.2008.2005605","volume":"20","author":"F Scarselli","year":"2008","unstructured":"Scarselli, F., Gori, M., Tsoi, A.C., Hagenbuchner, M., Monfardini, G.: The graph neural network model. IEEE Trans. Neural Netw. 20(1), 61\u201380 (2008)","journal-title":"IEEE Trans. Neural Netw."},{"key":"3501_CR21","doi-asserted-by":"publisher","first-page":"320","DOI":"10.1016\/j.neucom.2021.07.017","volume":"462","author":"Y Liu","year":"2021","unstructured":"Liu, Y., Zhang, X., Zhou, J., Fu, L.: Sg-dsn: a semantic graph-based dual-stream network for facial expression recognition. Neurocomputing 462, 320\u2013330 (2021)","journal-title":"Neurocomputing"},{"key":"3501_CR22","doi-asserted-by":"publisher","first-page":"7143","DOI":"10.1109\/TIP.2021.3101820","volume":"30","author":"X Jin","year":"2021","unstructured":"Jin, X., Lai, Z., Jin, Z.: Learning dynamic relationships for facial expression recognition based on graph convolutional network. IEEE Trans. Image Process. 30, 7143\u20137155 (2021)","journal-title":"IEEE Trans. Image Process."},{"issue":"7","key":"3501_CR23","volume":"35","author":"S Wang","year":"2023","unstructured":"Wang, S., Zhao, A., Lai, C., Zhang, Q., Li, D., Gao, Y., Dong, L., Wang, X.: Gcanet: Geometry cues-aware facial expression recognition based on graph convolutional networks. J. King Saud Univ. Comput. Inf. Sci. 35(7), 101605 (2023)","journal-title":"J. King Saud Univ. Comput. Inf. Sci."},{"key":"3501_CR24","unstructured":"Mnih, V., Heess, N., Graves, A., et al.: Recurrent models of visual attention. Advances in neural information processing systems, vol. 27 (2014)"},{"issue":"5","key":"3501_CR25","doi-asserted-by":"publisher","first-page":"2439","DOI":"10.1109\/TIP.2018.2886767","volume":"28","author":"Y Li","year":"2018","unstructured":"Li, Y., Zeng, J., Shan, S., Chen, X.: Occlusion aware facial expression recognition using cnn with attention mechanism. IEEE Trans. Image Process. 28(5), 2439\u20132450 (2018)","journal-title":"IEEE Trans. Image Process."},{"key":"3501_CR26","doi-asserted-by":"publisher","first-page":"4057","DOI":"10.1109\/TIP.2019.2956143","volume":"29","author":"K Wang","year":"2020","unstructured":"Wang, K., Peng, X., Yang, J., Meng, D., Qiao, Y.: Region attention networks for pose and occlusion robust facial expression recognition. IEEE Trans. Image Process. 29, 4057\u20134069 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"3501_CR27","unstructured":"Wen, Z., Lin, W., Wang, T., Xu, G.: Distract your attention: multi-head cross attention network for facial expression recognition. arxiv 2021. arXiv preprint arXiv:2109.07270"},{"key":"3501_CR28","doi-asserted-by":"crossref","unstructured":"Woo, S., Park, J., Lee, J.-Y., Kweon, I.S.: Cbam: Convolutional block attention module. In: Proceedings of the European Conference on Computer Vision, pp. 3\u201319 (2018)","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"3501_CR29","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"issue":"9","key":"3501_CR30","doi-asserted-by":"publisher","first-page":"607","DOI":"10.1016\/j.imavis.2011.07.002","volume":"29","author":"G Zhao","year":"2011","unstructured":"Zhao, G., Huang, X., Taini, M., Li, S.Z., Pietik\u00e4Inen, M.: Facial expression recognition from near-infrared videos. Image Vis. Comput. 29(9), 607\u2013619 (2011)","journal-title":"Image Vis. Comput."},{"key":"3501_CR31","doi-asserted-by":"crossref","unstructured":"Ruan, D., Yan, Y., Chen, S., Xue, J.-H., Wang, H.: Deep disturbance-disentangled learning for facial expression recognition. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 2833\u20132841 (2020)","DOI":"10.1145\/3394171.3413907"},{"key":"3501_CR32","unstructured":"Kanade, T., Cohn, J.F., Tian, Y.: Comprehensive database for facial expression analysis. In: Proceedings Fourth IEEE International Conference on Automatic Face and Gesture Recognition (cat. No. PR00580), pp. 46\u201353 (2000)"},{"key":"3501_CR33","doi-asserted-by":"crossref","unstructured":"Goodfellow, I.J., Erhan, D., Carrier, P.L., Courville, A., Mirza, M., Hamner, B., Cukierski, W., Tang, Y., Thaler, D., Lee, D.-H., : Challenges in representation learning: A report on three machine learning contests. In: Neural Information Processing: 20th International Conference, ICONIP 2013, Daegu, Korea, November 3-7, 2013. Proceedings, Part III 20, pp. 117\u2013124 (2013)","DOI":"10.1007\/978-3-642-42051-1_16"},{"key":"3501_CR34","doi-asserted-by":"crossref","unstructured":"Li, S., Deng, W., Du, J.: Reliable crowdsourcing and deep locality-preserving learning for expression recognition in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2852\u20132861 (2017)","DOI":"10.1109\/CVPR.2017.277"},{"key":"3501_CR35","doi-asserted-by":"publisher","first-page":"177","DOI":"10.1016\/j.patcog.2019.03.019","volume":"92","author":"S Xie","year":"2019","unstructured":"Xie, S., Hu, H., Wu, Y.: Deep multi-path convolutional neural network joint with salient region attention for facial expression recognition. Pattern Recognit. 92, 177\u2013191 (2019)","journal-title":"Pattern Recognit."},{"key":"3501_CR36","doi-asserted-by":"crossref","unstructured":"Yang, H., Ciftci, U., Yin, L.: Facial expression recognition by de-expression residue learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2168\u20132177 (2018)","DOI":"10.1109\/CVPR.2018.00231"},{"key":"3501_CR37","doi-asserted-by":"crossref","unstructured":"Zhao, X., Liang, X., Liu, L., Li, T., Han, Y., Vasconcelos, N., Yan, S.: Peak-piloted deep network for facial expression recognition. In: Computer Vision\u2013ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pp. 425\u2013442 (2016)","DOI":"10.1007\/978-3-319-46475-6_27"},{"key":"3501_CR38","doi-asserted-by":"crossref","unstructured":"Ali, K., Hughes, C.E.: Facial expression recognition by using a disentangled identity-invariant expression representation. In: 2020 25th International Conference on Pattern Recognition, pp. 9460\u20139467 (2021)","DOI":"10.1109\/ICPR48806.2021.9412172"},{"issue":"4","key":"3501_CR39","doi-asserted-by":"publisher","first-page":"1868","DOI":"10.1109\/TAFFC.2022.3197761","volume":"13","author":"J Jiang","year":"2022","unstructured":"Jiang, J., Deng, W.: Disentangling identity and pose for facial expression recognition. IEEE Trans. Affect. Comput. 13(4), 1868\u20131878 (2022)","journal-title":"IEEE Trans. Affect. Comput."},{"issue":"1","key":"3501_CR40","doi-asserted-by":"publisher","first-page":"800","DOI":"10.1109\/TAFFC.2020.3027340","volume":"14","author":"W Chen","year":"2020","unstructured":"Chen, W., Zhang, D., Li, M., Lee, D.-J.: Stcam: spatial-temporal and channel attention module for dynamic facial expression recognition. IEEE Trans. Affect. Comput. 14(1), 800\u2013810 (2020)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"3501_CR41","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/s11760-024-03113-4","volume":"2024","author":"M Sun","year":"2024","unstructured":"Sun, M., Yan, C.: Fgenet: a lightweight facial expression recognition algorithm based on fasternet. Signal Image Video Process. 2024, 1\u201318 (2024)","journal-title":"Signal Image Video Process."},{"key":"3501_CR42","unstructured":"Zhu, A., Li, K., Wu, T., Zhao, P., Zhou, W., Hong, B.: Cross-task multi-branch vision transformer for facial expression and mask wearing classification. arXiv preprint arXiv:2404.14606 (2024)"},{"key":"3501_CR43","doi-asserted-by":"crossref","unstructured":"Ruan, D., Yan, Y., Lai, S., Chai, Z., Shen, C., Wang, H.: Feature decomposition and reconstruction learning for effective facial expression recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 7660\u20137669 (2021)","DOI":"10.1109\/CVPR46437.2021.00757"},{"key":"3501_CR44","doi-asserted-by":"crossref","unstructured":"Ding, H., Zhou, S.K., Chellappa, R.: Facenet2expnet: Regularizing a deep face recognition net for expression recognition. In: 2017 12th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2017), pp. 118\u2013126 (2017). IEEE","DOI":"10.1109\/FG.2017.23"},{"key":"3501_CR45","doi-asserted-by":"crossref","unstructured":"Bai, M., Xie, W., Shen, L.: Disentangled feature based adversarial learning for facial expression recognition. In: 2019 IEEE International Conference on Image Processing, pp. 31\u201335 (2019)","DOI":"10.1109\/ICIP.2019.8802941"},{"key":"3501_CR46","doi-asserted-by":"publisher","first-page":"499","DOI":"10.1007\/s00371-019-01636-3","volume":"36","author":"D Liang","year":"2020","unstructured":"Liang, D., Liang, H., Yu, Z., Zhang, Y.: Deep convolutional bilstm fusion network for facial expression recognition. Vis. Comput. 36, 499\u2013508 (2020)","journal-title":"Vis. Comput."},{"key":"3501_CR47","doi-asserted-by":"publisher","first-page":"166","DOI":"10.1016\/j.patrec.2020.01.016","volume":"131","author":"M Yu","year":"2020","unstructured":"Yu, M., Zheng, H., Peng, Z., Dong, J., Du, H.: Facial expression recognition based on a multi-task global-local network. Pattern Recognit. Lett. 131, 166\u2013171 (2020)","journal-title":"Pattern Recognit. Lett."},{"key":"3501_CR48","doi-asserted-by":"crossref","unstructured":"Salunke, V.V., Patil, C.: A new approach for automatic face emotion recognition and classification based on deep networks. In: 2017 International Conference on Computing, Communication, Control and Automation (ICCUBEA), pp. 1\u20135 (2017)","DOI":"10.1109\/ICCUBEA.2017.8463785"},{"key":"3501_CR49","doi-asserted-by":"publisher","first-page":"012100","DOI":"10.1088\/1742-6596\/1757\/1\/012100","volume":"1757","author":"R Lu","year":"2021","unstructured":"Lu, R., Li, Y., Yang, P., Zhang, W.: Facial expression recognition based on convolutional neural network. J. Phys. Conf. Ser. 1757, 012100 (2021)","journal-title":"J. Phys. Conf. Ser."},{"issue":"5","key":"3501_CR50","doi-asserted-by":"publisher","first-page":"2787","DOI":"10.1109\/TCYB.2019.2925095","volume":"51","author":"W Xie","year":"2019","unstructured":"Xie, W., Shen, L., Duan, J.: Adaptive weighting of handcrafted feature losses for facial expression recognition. IEEE Trans. Cybern. 51(5), 2787\u20132800 (2019)","journal-title":"IEEE Trans. Cybern."},{"key":"3501_CR51","doi-asserted-by":"publisher","first-page":"39255","DOI":"10.1109\/ACCESS.2021.3063493","volume":"9","author":"C Shi","year":"2021","unstructured":"Shi, C., Tan, C., Wang, L.: A facial expression recognition method based on a multibranch cross-connection convolutional neural network. IEEE Access 9, 39255\u201339274 (2021)","journal-title":"IEEE Access"},{"key":"3501_CR52","doi-asserted-by":"publisher","first-page":"1635","DOI":"10.1007\/s00371-019-01759-7","volume":"36","author":"X Liu","year":"2020","unstructured":"Liu, X., Zhou, F.: Improved curriculum learning using ssm for facial expression recognition. Vis. Comput. 36, 1635\u20131649 (2020)","journal-title":"Vis. Comput."},{"key":"3501_CR53","doi-asserted-by":"crossref","unstructured":"Weng, J., Yang, Y., Tan, Z., Lei, Z.: Attentive hybrid feature with two-step fusion for facial expression recognition. In: 2020 25th International Conference on Pattern Recognition, pp. 6410\u20136416 (2021)","DOI":"10.1109\/ICPR48806.2021.9412554"},{"issue":"12","key":"3501_CR54","doi-asserted-by":"publisher","first-page":"12649","DOI":"10.1109\/TCYB.2021.3085744","volume":"52","author":"P Liu","year":"2021","unstructured":"Liu, P., Lin, Y., Meng, Z., Lu, L., Deng, W., Zhou, J.T., Yang, Y.: Point adversarial self-mining: a simple method for facial expression recognition. IEEE Trans. Cybern. 52(12), 12649\u201312660 (2021)","journal-title":"IEEE Trans. Cybern."},{"issue":"3","key":"3501_CR55","doi-asserted-by":"publisher","first-page":"1927","DOI":"10.1109\/TAFFC.2022.3156920","volume":"14","author":"J Cai","year":"2022","unstructured":"Cai, J., Meng, Z., Khan, A.S., Li, Z., O\u00e2\u0102\u0179Reilly, J., Tong, Y.: Probabilistic attribute tree structured convolutional neural networks for facial expression recognition in the wild. IEEE Trans. Affect. Comput. 14(3), 1927\u20131941 (2022)","journal-title":"IEEE Trans. Affect. Comput."},{"key":"3501_CR56","doi-asserted-by":"publisher","first-page":"26756","DOI":"10.1109\/ACCESS.2022.3156598","volume":"10","author":"AP Fard","year":"2022","unstructured":"Fard, A.P., Mahoor, M.H.: Ad-corre: adaptive correlation-based loss for facial expression recognition in the wild. IEEE Access 10, 26756\u201326768 (2022)","journal-title":"IEEE Access"},{"key":"3501_CR57","doi-asserted-by":"publisher","first-page":"4057","DOI":"10.1109\/TIP.2019.2956143","volume":"29","author":"K Wang","year":"2020","unstructured":"Wang, K., Peng, X., Yang, J., Meng, D., Qiao, Y.: Region attention networks for pose and occlusion robust facial expression recognition. IEEE Trans. Image Process. 29, 4057\u20134069 (2020)","journal-title":"IEEE Trans. Image Process."},{"key":"3501_CR58","doi-asserted-by":"crossref","unstructured":"Wang, K., Peng, X., Yang, J., Lu, S., Qiao, Y.: Suppressing uncertainties for large-scale facial expression recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6897\u20136906 (2020)","DOI":"10.1109\/CVPR42600.2020.00693"},{"key":"3501_CR59","doi-asserted-by":"publisher","first-page":"105","DOI":"10.1016\/j.patrec.2019.04.002","volume":"125","author":"Y Gan","year":"2019","unstructured":"Gan, Y., Chen, J., Xu, L.: Facial expression recognition boosted by soft label with a diverse ensemble. Pattern Recognit. Lett. 125, 105\u2013112 (2019)","journal-title":"Pattern Recognit. Lett."},{"key":"3501_CR60","doi-asserted-by":"crossref","unstructured":"Zhang, W., Ji, X., Chen, K., Ding, Y., Fan, C.: Learning a facial expression embedding disentangled from identity. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6759\u20136768 (2021)","DOI":"10.1109\/CVPR46437.2021.00669"}],"container-title":["Signal, Image and Video Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-024-03501-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11760-024-03501-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-024-03501-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,4]],"date-time":"2024-11-04T07:18:54Z","timestamp":1730704734000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11760-024-03501-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,8,14]]},"references-count":60,"journal-issue":{"issue":"12","published-print":{"date-parts":[[2024,12]]}},"alternative-id":["3501"],"URL":"https:\/\/doi.org\/10.1007\/s11760-024-03501-w","relation":{},"ISSN":["1863-1703","1863-1711"],"issn-type":[{"value":"1863-1703","type":"print"},{"value":"1863-1711","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,8,14]]},"assertion":[{"value":"14 June 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 July 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 August 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"14 August 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}