{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,23]],"date-time":"2026-02-23T13:31:05Z","timestamp":1771853465499,"version":"3.50.1"},"reference-count":45,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2024,5,7]],"date-time":"2024-05-07T00:00:00Z","timestamp":1715040000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,5,7]],"date-time":"2024-05-07T00:00:00Z","timestamp":1715040000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["No.61573266"],"award-info":[{"award-number":["No.61573266"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Natural Science Basic Research Program of Shaanxi","award":["No.2021JM-133"],"award-info":[{"award-number":["No.2021JM-133"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2024,6]]},"DOI":"10.1007\/s00530-024-01345-5","type":"journal-article","created":{"date-parts":[[2024,5,7]],"date-time":"2024-05-07T20:12:54Z","timestamp":1715112774000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":13,"title":["A Three-stage multimodal emotion recognition network based on text low-rank fusion"],"prefix":"10.1007","volume":"30","author":[{"given":"Linlin","family":"Zhao","sequence":"first","affiliation":[]},{"given":"Youlong","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Tong","family":"Ning","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,5,7]]},"reference":[{"key":"1345_CR1","doi-asserted-by":"publisher","first-page":"109978","DOI":"10.1016\/j.knosys.2022.109978","volume":"258","author":"S Zou","year":"2022","unstructured":"Zou, S., Huang, X., Shen, X., Liu, H.: Improving multimodal fusion with main modal transformer for emotion recognition in conversation. Knowl. Based Syst. 258, 109978 (2022)","journal-title":"Knowl. Based Syst."},{"issue":"6","key":"1345_CR2","doi-asserted-by":"publisher","first-page":"59","DOI":"10.1109\/MSP.2021.3106895","volume":"38","author":"S Zhao","year":"2021","unstructured":"Zhao, S., Jia, G., Yang, J., Ding, G., Keutzer, K.: Emotion recognition from multiple modalities: fundamentals and methodologies. IEEE Signal Process. Mag. 38(6), 59\u201373 (2021). https:\/\/doi.org\/10.1109\/MSP.2021.3106895","journal-title":"IEEE Signal Process. Mag."},{"key":"1345_CR3","doi-asserted-by":"publisher","first-page":"117327","DOI":"10.1109\/ACCESS.2019.2936124","volume":"7","author":"RA Khalil","year":"2019","unstructured":"Khalil, R.A., Jones, E., Babar, M.I., Jan, T., Zafar, M.H., Alhussain, T.: Speech emotion recognition using deep learning techniques: a review. IEEE Access 7, 117327\u2013117345 (2019). https:\/\/doi.org\/10.1109\/ACCESS.2019.2936124","journal-title":"IEEE Access"},{"key":"1345_CR4","doi-asserted-by":"crossref","unstructured":"Zhao, Z., Liu, Q., Zhou, F.: Robust lightweightrobust facial expression recognition network with label distribution training. In: AAAI Conference on Artificial Intelligence (2021). https:\/\/api.semanticscholar.org\/CorpusID:235306283","DOI":"10.1609\/aaai.v35i4.16465"},{"key":"1345_CR5","doi-asserted-by":"publisher","first-page":"108078","DOI":"10.1016\/j.cie.2022.108078","volume":"168","author":"J Zhang","year":"2022","unstructured":"Zhang, J., Xing, L., Tan, Z., Wang, H., Wang, K.: Multi-head attention fusion networks for multi-modal speech emotion recognition. Comput. Ind. Eng. 168, 108078 (2022)","journal-title":"Comput. Ind. Eng."},{"issue":"4","key":"1345_CR6","doi-asserted-by":"publisher","first-page":"1052","DOI":"10.1109\/TVT.2004.830974","volume":"53","author":"Q Ji","year":"2004","unstructured":"Ji, Q., Zhu, Z., Lan, P.: Real-time nonintrusive monitoring and prediction of driver fatigue. IEEE Trans. Veh. Technol. 53(4), 1052\u20131068 (2004). https:\/\/doi.org\/10.1109\/TVT.2004.830974","journal-title":"IEEE Trans. Veh. Technol."},{"key":"1345_CR7","doi-asserted-by":"crossref","unstructured":"Huang, C., Zaiane, O.R., Trabelsi, A., Dziri, N.: Automatic dialogue generation with expressed emotions. In: North American Chapter of the Association for Computational Linguistics (2018). https:\/\/api.semanticscholar.org\/CorpusID:13788863","DOI":"10.18653\/v1\/N18-2008"},{"key":"1345_CR8","doi-asserted-by":"crossref","unstructured":"Busso, C., Bulut, M., Narayanan, S.S.: Toward effective automatic recognition systems of emotion in speech. (2014). https:\/\/api.semanticscholar.org\/CorpusID:31805666","DOI":"10.1093\/acprof:oso\/9780195387643.003.0008"},{"key":"1345_CR9","doi-asserted-by":"publisher","first-page":"679","DOI":"10.1016\/j.ins.2022.11.076","volume":"619","author":"S Liu","year":"2022","unstructured":"Liu, S., Gao, P., Li, Y., Fu, W., Ding, W.: Multi-modal fusion network with complementarity and importance for emotion recognition. Inf. Sci. 619, 679\u2013694 (2022)","journal-title":"Inf. Sci."},{"key":"1345_CR10","doi-asserted-by":"publisher","first-page":"124","DOI":"10.1016\/j.knosys.2018.07.041","volume":"161","author":"N Majumder","year":"2018","unstructured":"Majumder, N., Hazarika, D., Gelbukh, A., Cambria, E., Poria, S.: Multimodal sentiment analysis using hierarchical fusion with context modeling. Knowl. Based Syst. 161, 124\u2013133 (2018)","journal-title":"Knowl. Based Syst."},{"key":"1345_CR11","doi-asserted-by":"publisher","first-page":"126623","DOI":"10.1016\/j.neucom.2023.126623","volume":"555","author":"C Gan","year":"2023","unstructured":"Gan, C., Wang, K., Zhu, Q., Xiang, Y., Jain, D.K., Garc\u00eda, S.: Speech emotion recognition via multiple fusion under spatial-temporal parallel network. Neurocomputing 555, 126623 (2023)","journal-title":"Neurocomputing"},{"key":"1345_CR12","doi-asserted-by":"crossref","unstructured":"Cheng, J., Dong, L., Lapata, M.: Long short-term memory-networks for machine reading. ArXiv arXiv:abs\/1601.06733 (2016)","DOI":"10.18653\/v1\/D16-1053"},{"key":"1345_CR13","doi-asserted-by":"crossref","unstructured":"Dai, W., Cahyawijaya, S., Liu, Z., Fung, P.: Multimodal end-to-end sparse model for emotion recognition. ArXiv arXiv:abs\/2103.09666 (2021)","DOI":"10.18653\/v1\/2021.naacl-main.417"},{"key":"1345_CR14","doi-asserted-by":"publisher","first-page":"2278","DOI":"10.1109\/5.726791","volume":"86","author":"Y LeCun","year":"1998","unstructured":"LeCun, Y., Bottou, L., Bengio, Y., Haffner, P.: Gradient-based learning applied to document recognition. Proc. IEEE 86, 2278\u20132324 (1998)","journal-title":"Proc. IEEE"},{"key":"1345_CR15","doi-asserted-by":"crossref","unstructured":"Chumachenko, K., Iosifidis, A., Gabbouj, M.: Self-attention fusion for audiovisual emotion recognition with incomplete data. 2022 26th International Conference on Pattern Recognition (ICPR), 2822\u20132828 (2022)","DOI":"10.1109\/ICPR56361.2022.9956592"},{"key":"1345_CR16","doi-asserted-by":"crossref","unstructured":"Liu, Z., Shen, Y., Lakshminarasimhan, V.B., Liang, P.P., Zadeh, A., Morency, L.-P.: Efficient low-rank multimodal fusion with modality-specific factors. In: Annual Meeting of the Association for Computational Linguistics (2018). https:\/\/api.semanticscholar.org\/CorpusID:44131945","DOI":"10.18653\/v1\/P18-1209"},{"key":"1345_CR17","doi-asserted-by":"crossref","unstructured":"Zadeh, A., Chen, M., Poria, S., Cambria, E., Morency, L.-P.: Tensor fusion network for multimodal sentiment analysis. In: Conference on Empirical Methods in Natural Language Processing (2017). https:\/\/api.semanticscholar.org\/CorpusID:950292","DOI":"10.18653\/v1\/D17-1115"},{"issue":"3","key":"1345_CR18","doi-asserted-by":"publisher","first-page":"300","DOI":"10.1109\/TAFFC.2016.2553038","volume":"8","author":"S Zhalehpour","year":"2017","unstructured":"Zhalehpour, S., Onder, O., Akhtar, Z., Erdem, C.E.: Baum-1: a spontaneous audio-visual face database of affective and mental states. IEEE Trans. Affect. Comput. 8(3), 300\u2013313 (2017). https:\/\/doi.org\/10.1109\/TAFFC.2016.2553038","journal-title":"IEEE Trans. Affect. Comput."},{"key":"1345_CR19","doi-asserted-by":"publisher","unstructured":"Dhall, A., Goecke, R., Lucey, S., Gedeon, T.: Static facial expression analysis in tough conditions: data, evaluation protocol and benchmark. In: 2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops), pp. 2106\u20132112 (2011). https:\/\/doi.org\/10.1109\/ICCVW.2011.6130508","DOI":"10.1109\/ICCVW.2011.6130508"},{"issue":"1","key":"1345_CR20","doi-asserted-by":"publisher","first-page":"5","DOI":"10.1109\/T-AFFC.2011.20","volume":"3","author":"G McKeown","year":"2012","unstructured":"McKeown, G., Valstar, M., Cowie, R., Pantic, M., Schroder, M.: The semaine database: annotated multimodal records of emotionally colored conversations between a person and a limited agent. IEEE Trans. Affect. Comput. 3(1), 5\u201317 (2012). https:\/\/doi.org\/10.1109\/T-AFFC.2011.20","journal-title":"IEEE Trans. Affect. Comput."},{"key":"1345_CR21","unstructured":"Zadeh, A., Zellers, R., Pincus, E., Morency, L.-P.: Mosi: Multimodal corpus of sentiment intensity and subjectivity analysis in online opinion videos. ArXiv arXiv:abs\/1606.06259 (2016)"},{"key":"1345_CR22","unstructured":"Zadeh, A., Liang, P.P., Poria, S., Cambria, E., Morency, L.-P.: Multimodal language analysis in the wild: Cmu-mosei dataset and interpretable dynamic fusion graph. In: Annual Meeting of the Association for Computational Linguistics (2018). https:\/\/api.semanticscholar.org\/CorpusID:51868869"},{"key":"1345_CR23","doi-asserted-by":"publisher","first-page":"335","DOI":"10.1007\/s10579-008-9076-6","volume":"42","author":"C Busso","year":"2008","unstructured":"Busso, C., Bulut, M., Lee, C.-C., Kazemzadeh, E.A., Provost, E.M., Kim, S., Chang, J.N., Lee, S., Narayanan, S.S.: Iemocap: interactive emotional dyadic motion capture database. Lang. Resour. Eval. 42, 335\u2013359 (2008)","journal-title":"Lang. Resour. Eval."},{"key":"1345_CR24","doi-asserted-by":"crossref","unstructured":"Poria, S., Hazarika, D., Majumder, N., Naik, G., Cambria, E., Mihalcea, R.: Meld: A multimodal multi-party dataset for emotion recognition in conversations. ArXiv arXiv:abs\/1810.02508 (2018)","DOI":"10.18653\/v1\/P19-1050"},{"key":"1345_CR25","doi-asserted-by":"publisher","unstructured":"Ringeval, F., Sonderegger, A., Sauer, J., Lalanne, D.: Introducing the recola multimodal corpus of remote collaborative and affective interactions. In: 2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition (FG), pp. 1\u20138 (2013). https:\/\/doi.org\/10.1109\/FG.2013.6553805","DOI":"10.1109\/FG.2013.6553805"},{"key":"1345_CR26","doi-asserted-by":"crossref","unstructured":"Schmidt, P., Reiss, A., D\u00fcrichen, R., Marberger, C., Laerhoven, K.V.: Introducing wesad, a multimodal dataset for wearable stress and affect detection. Proceedings of the 20th ACM International Conference on Multimodal Interaction (2018)","DOI":"10.1145\/3242969.3242985"},{"key":"1345_CR27","doi-asserted-by":"crossref","unstructured":"Eyben, F., Wllmer, M., Schuller, B.: Opensmile: the munich versatile and fast open-source audio feature extractor. In: Acm International Conference on Multimedia (2010)","DOI":"10.1145\/1873951.1874246"},{"key":"1345_CR28","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Weinberger, K.Q.: Densely connected convolutional networks. 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2261\u20132269 (2016)","DOI":"10.1109\/CVPR.2017.243"},{"key":"1345_CR29","unstructured":"Tan, M., Le, Q.V.: Efficientnet: Rethinking model scaling for convolutional neural networks. ArXiv arXiv:abs\/1905.11946 (2019)"},{"key":"1345_CR30","unstructured":"Devlin, J., Chang, M.-W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. In: North American Chapter of the Association for Computational Linguistics (2019). https:\/\/api.semanticscholar.org\/CorpusID:52967399"},{"key":"1345_CR31","doi-asserted-by":"publisher","first-page":"42","DOI":"10.1016\/j.neucom.2020.01.048","volume":"391","author":"M Hao","year":"2020","unstructured":"Hao, M., Cao, W., Liu, Z., Wu, M., Xiao, P.: Visual-audio emotion recognition based on multi-task and ensemble learning with multiple features. Neurocomputing 391, 42\u201351 (2020)","journal-title":"Neurocomputing"},{"key":"1345_CR32","doi-asserted-by":"publisher","first-page":"27","DOI":"10.1016\/j.neucom.2018.03.068","volume":"309","author":"J Yan","year":"2018","unstructured":"Yan, J., Zheng, W., Cui, Z., Tang, C., Zhang, T., Zong, Y.: Multi-cue fusion for emotion recognition in the wild. Neurocomputing 309, 27\u201335 (2018)","journal-title":"Neurocomputing"},{"key":"1345_CR33","unstructured":"Zaremba, W., Sutskever, I., Vinyals, O.: Recurrent neural network regularization. ArXiv arXiv:abs\/1409.2329 (2014)"},{"key":"1345_CR34","unstructured":"Cambria, E., Hazarika, D., Poria, S., Hussain, A., Subramanyam, R.B.V.: Benchmarking multimodal sentiment analysis. ArXiv arXiv:abs\/1707.09538 (2017)"},{"key":"1345_CR35","doi-asserted-by":"crossref","unstructured":"Tsai, Y.-H.H., Bai, S., Liang, P.P., Kolter, J.Z., Morency, L.-P., Salakhutdinov, R.: Multimodal transformer for unaligned multimodal language sequences. Proceedings of the conference. Association for Computational Linguistics. Meeting 2019, 6558\u20136569 (2019)","DOI":"10.18653\/v1\/P19-1656"},{"key":"1345_CR36","doi-asserted-by":"publisher","first-page":"296","DOI":"10.1016\/j.inffus.2022.07.006","volume":"88","author":"F Zhang","year":"2022","unstructured":"Zhang, F., Li, X.-C., Lim, C.P., Hua, Q., Dong, C.-R., Zhai, J.-H.: Deep emotional arousal network for multimodal sentiment analysis and emotion recognition. Inf. Fusion 88, 296\u2013304 (2022)","journal-title":"Inf. Fusion"},{"key":"1345_CR37","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3338769","author":"R Huan","year":"2023","unstructured":"Huan, R., Zhong, G., Chen, P., Liang, R.: Unimf: a unified multimodal framework for multimodal sentiment analysis in missing modalities and unaligned multimodal sequences. IEEE Trans. Multimed. (2023). https:\/\/doi.org\/10.1109\/TMM.2023.3338769","journal-title":"IEEE Trans. Multimed."},{"key":"1345_CR38","doi-asserted-by":"publisher","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770\u2013778 (2016). https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"1345_CR39","unstructured":"Vaswani, A., Shazeer, N.M., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. In: Neural Information Processing Systems (2017). https:\/\/api.semanticscholar.org\/CorpusID:13756489"},{"key":"1345_CR40","doi-asserted-by":"crossref","unstructured":"Hu, J., Shen, L., Albanie, S., Sun, G., Wu, E.: Squeeze-and-excitation networks. 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 7132\u20137141 (2017)","DOI":"10.1109\/CVPR.2018.00745"},{"key":"1345_CR41","doi-asserted-by":"crossref","unstructured":"Sun, Z., Sarma, P.K., Sethares, W.A., Liang, Y.: Learning relationships between text, audio, and video via deep canonical correlation for multimodal language analysis. In: AAAI Conference on Artificial Intelligence (2019). https:\/\/api.semanticscholar.org\/CorpusID:207930647","DOI":"10.1609\/aaai.v34i05.6431"},{"key":"1345_CR42","doi-asserted-by":"crossref","unstructured":"Wang, X., Girshick, R.B., Gupta, A.K., He, K.: Non-local neural networks. 2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 7794\u20137803 (2017)","DOI":"10.1109\/CVPR.2018.00813"},{"key":"1345_CR43","doi-asserted-by":"crossref","unstructured":"Williams, J., Kleinegesse, S., Comanescu, R., Radu, O.: Recognizing emotions in video using multimodal dnn feature fusion. (2018). https:\/\/api.semanticscholar.org\/CorpusID:52000158","DOI":"10.18653\/v1\/W18-3302"},{"key":"1345_CR44","doi-asserted-by":"crossref","unstructured":"Zadeh, A., Liang, P.P., Mazumder, N., Poria, S., Cambria, E., Morency, L.-P.: Memory fusion network for multi-view sequential learning. ArXiv arXiv:abs\/1802.00927 (2018)","DOI":"10.1609\/aaai.v32i1.12021"},{"key":"1345_CR45","doi-asserted-by":"publisher","unstructured":"Lv, F., Chen, X., Huang, Y., Duan, L., Lin, G.: Progressive modality reinforcement for human multimodal emotion recognition from unaligned multimodal sequences. In: 2021 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2554\u20132562 (2021). https:\/\/doi.org\/10.1109\/CVPR46437.2021.00258","DOI":"10.1109\/CVPR46437.2021.00258"}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01345-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-024-01345-5\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01345-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,5]],"date-time":"2024-07-05T17:17:30Z","timestamp":1720199850000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-024-01345-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,5,7]]},"references-count":45,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2024,6]]}},"alternative-id":["1345"],"URL":"https:\/\/doi.org\/10.1007\/s00530-024-01345-5","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,5,7]]},"assertion":[{"value":"27 February 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 April 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 May 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"We declare that we do not have any commercial or associative interest that represents a Conflict of interest in connection with the work submitted.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"142"}}