{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,10]],"date-time":"2026-02-10T05:01:01Z","timestamp":1770699661996,"version":"3.49.0"},"publisher-location":"Singapore","reference-count":27,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819569564","type":"print"},{"value":"9789819569571","type":"electronic"}],"license":[{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,1]],"date-time":"2026-01-01T00:00:00Z","timestamp":1767225600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-981-95-6957-1_40","type":"book-chapter","created":{"date-parts":[[2026,2,9]],"date-time":"2026-02-09T10:44:15Z","timestamp":1770633855000},"page":"561-574","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["MCN-CL: Multimodal Cross-Attention Network and\u00a0Contrastive Learning for\u00a0Multimodal Emotion Recognition"],"prefix":"10.1007","author":[{"given":"Feng","family":"Li","sequence":"first","affiliation":[]},{"given":"Ke","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Yongwei","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,2,10]]},"reference":[{"key":"40_CR1","doi-asserted-by":"crossref","unstructured":"Li, F., Liu, R., Wang, B.: MIMCL: Multilayer interaction module with contrastive learning for speech emotion recognition, In: International Conference on Multimedia Retrieval, pp. 724\u2013732 (2025)","DOI":"10.1145\/3731715.3733388"},{"key":"40_CR2","doi-asserted-by":"crossref","unstructured":"Erkantarci, B., Bakal, G.: An empirical study of sentiment analysis utilizing machine learning and deep learning algorithms, J. Comput. Soc. Sci. 1\u201317 (2023)","DOI":"10.1007\/s42001-023-00236-5"},{"key":"40_CR3","doi-asserted-by":"crossref","unstructured":"Li, F., Luo, J., Xia, W.: Wavfusion: towards wav2vec 2.0 multimodal speech emotion recognition, In: International Conference on Multimedia Modeling, Springer, pp. 325\u2013336 (2025)","DOI":"10.1007\/978-981-96-2071-5_24"},{"key":"40_CR4","doi-asserted-by":"crossref","unstructured":"Park, S., Shim, H.S., Chatterjee, M., Sagae, K., philippe Morency, L.: Multimodal analysis and prediction of persuasiveness in online social multimedia, ACM Trans. Interactive Intell. Syst. (TiiS) 6, 1 \u2013 25 (2016)","DOI":"10.1145\/2897739"},{"key":"40_CR5","doi-asserted-by":"crossref","unstructured":"Le, P.N., Epps, J., Choi, E.H.C., Ambikairajah, E.: A study of voice source and vocal tract filter based features in cognitive load classification, In: 2010 20th International Conference on Pattern Recognition, pp. 4516\u20134519 (2010)","DOI":"10.1109\/ICPR.2010.1097"},{"key":"40_CR6","doi-asserted-by":"crossref","unstructured":"Ma, H., Wang, J., Lin, H., Pan, X., Zhang, Y., Yang, Z.: A multi-view network for real-time emotion recognition in conversations . Knowl. Based Syst. 236, 107751 (2021)","DOI":"10.1016\/j.knosys.2021.107751"},{"key":"40_CR7","doi-asserted-by":"crossref","unstructured":"Li, Z., Tang, F., Zhao, M., Zhu, Y.: Emocaps: emotion capsule based model for conversational emotion recognition, In: Findings (2022)","DOI":"10.18653\/v1\/2022.findings-acl.126"},{"key":"40_CR8","unstructured":"Cambria, E., Hazarika, D., Poria, S., Hussain, A., Subramanyam, R.B.V.: Benchmarking multimodal sentiment analysis, ArXiv abs\/1707.09538 (2017)"},{"key":"40_CR9","doi-asserted-by":"crossref","unstructured":"Wang, H., Li, X., Ren, Z., Wang, M., Ma, C.: Multimodal sentiment analysis representations learning via contrastive learning with condense attention fusion, Sensors (Basel, Switzerland) 23 (2023)","DOI":"10.3390\/s23052679"},{"key":"40_CR10","doi-asserted-by":"crossref","unstructured":"Hu, D., Hou, X., Wei, L., Jiang, L.-X., Mo, Y.: Mm-DFN: multimodal dynamic fusion network for emotion recognition in conversations, IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 7037\u20137041 (2022)","DOI":"10.1109\/ICASSP43922.2022.9747397"},{"key":"40_CR11","doi-asserted-by":"crossref","unstructured":"Ghosh, S., Anwar, T.: Depression intensity estimation via social media: a deep learning approach . IEEE Trans. Comput. Soc. Syst. 8, 1465\u20131474 (2021)","DOI":"10.1109\/TCSS.2021.3084154"},{"key":"40_CR12","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition, In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770\u2013778 (2022)","DOI":"10.1109\/CVPR.2016.90"},{"key":"40_CR13","doi-asserted-by":"crossref","unstructured":"Shi, T., Huang, S.-L.: Multiemo: an attention-based correlation-aware multimodal fusion framework for emotion recognition in conversations, In: Annual Meeting of the Association for Computational Linguistics (2023)","DOI":"10.18653\/v1\/2023.acl-long.824"},{"key":"40_CR14","doi-asserted-by":"crossref","unstructured":"Yun, T., Lim, H., Lee, J., Song, M.: Telme: teacher-leading multimodal fusion network for emotion recognition in conversation, In: North American Chapter of the Association for Computational Linguistics (2024)","DOI":"10.18653\/v1\/2024.naacl-long.5"},{"key":"40_CR15","doi-asserted-by":"crossref","unstructured":"Poria, S., et al.: Context-dependent sentiment analysis in user-generated videos, In: Annual Meeting of the Association for Computational Linguistics (2017)","DOI":"10.18653\/v1\/P17-1081"},{"key":"40_CR16","doi-asserted-by":"crossref","unstructured":"Majumder, N.., Poria, S., Hazarika, D., Mihalcea, R., Gelbukh, A., Cambria, E.: Dialoguernn: an attentive RNN for emotion detection in conversations, In: AAAI Conference on Artificial Intelligence (2018)","DOI":"10.1609\/aaai.v33i01.33016818"},{"key":"40_CR17","doi-asserted-by":"crossref","unstructured":"Ghosal, D., Majumder, N., Poria, S., Chhaya, N., Gelbukh, A.: Dialoguegcn: a graph convolutional neural network for emotion recognition in conversation, In: Conference on Empirical Methods in Natural Language Processing (2019)","DOI":"10.18653\/v1\/D19-1015"},{"key":"40_CR18","doi-asserted-by":"crossref","unstructured":"Hu, J., Liu, Y., Zhao, J., Jin, Q.: Mmgcn: multimodal fusion via deep graph convolution network for emotion recognition in conversation, arXiv:2107.06779 (2021)","DOI":"10.18653\/v1\/2021.acl-long.440"},{"key":"40_CR19","doi-asserted-by":"crossref","unstructured":"Li, J., Wang, X., Lv, G., Zeng, Z.: Ga2mif: Graph and attention based two-stage multi-source information fusion for conversational emotion detection . IEEE Trans. Affect. Comput. 15, 130\u2013143 (2022)","DOI":"10.1109\/TAFFC.2023.3261279"},{"key":"40_CR20","doi-asserted-by":"crossref","unstructured":"Poria, S., Hazarika, D., Majumder, N., Naik, G., Cambria, E., Mihalcea, R.: Meld: a multimodal multi-party dataset for emotion recognition in conversations, ArXiv abs\/1810.02508 (2018)","DOI":"10.18653\/v1\/P19-1050"},{"key":"40_CR21","doi-asserted-by":"crossref","unstructured":"Poria, S., Hazarika, D., Majumder, N., Naik, G., Cambria, E., Mihalcea, R.: Meld: a multimodal multi-party dataset for emotion recognition in conversations, ArXiv abs\/1810.02508 (2018)","DOI":"10.18653\/v1\/P19-1050"},{"key":"40_CR22","doi-asserted-by":"crossref","unstructured":"Lu, X., Zhao, Y., Wu, Y., Tian, Y., Chen, H., Qin, B.: An iterative emotion interaction network for emotion recognition in conversations, In: International Conference on Computational Linguistics (2020)","DOI":"10.18653\/v1\/2020.coling-main.360"},{"key":"40_CR23","doi-asserted-by":"crossref","unstructured":"Liang, T., Lin, G., Feng, L., Zhang, Y., Lv, F.: Attention is not enough: mitigating the distribution discrepancy in asynchronous multimodal sequence fusion. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 8148\u20138156 (2021)","DOI":"10.1109\/ICCV48922.2021.00804"},{"key":"40_CR24","doi-asserted-by":"crossref","unstructured":"Lv, F., Chen, X., Huang, Y., Duan, L., Lin, G.: Progressive modality reinforcement for human multimodal emotion recognition from unaligned multimodal sequences. In : Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2554\u20132562 (2021)","DOI":"10.1109\/CVPR46437.2021.00258"},{"key":"40_CR25","doi-asserted-by":"crossref","unstructured":"Chen, G., Liu, S., Bi, X., Chen, Q., Meng, T.: Crgmr: a contextualized RGAT and graphtransformer method for multimodal emotion recognition, In: 2024 43rd Chinese Control Conference (CCC), pp. 8286\u20138291 (2024)","DOI":"10.23919\/CCC63176.2024.10661855"},{"key":"40_CR26","doi-asserted-by":"crossref","unstructured":"Hu, G., Lin, T.-E., Zhao, Y., Lu, G., Wu, Y., Li, Y.: Unimse: towards unified multimodal sentiment analysis and emotion recognition, In: Conference on Empirical Methods in Natural Language Processing (2022)","DOI":"10.18653\/v1\/2022.emnlp-main.534"},{"key":"40_CR27","doi-asserted-by":"crossref","unstructured":"Ren, M., Huang, X., Liu, J., Liu, M., Li, X., Liu, A.: Maln: multimodal adversarial learning network for conversational emotion recognition, IEEE Trans. Circuits Syst. Video Technol. 33, 6965\u20136980 (2023)","DOI":"10.1109\/TCSVT.2023.3273577"}],"container-title":["Lecture Notes in Computer Science","MultiMedia Modeling"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-95-6957-1_40","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,9]],"date-time":"2026-02-09T10:44:21Z","timestamp":1770633861000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-95-6957-1_40"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026]]},"ISBN":["9789819569564","9789819569571"],"references-count":27,"URL":"https:\/\/doi.org\/10.1007\/978-981-95-6957-1_40","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026]]},"assertion":[{"value":"10 February 2026","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"MMM","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Multimedia Modeling","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Prague","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Czech Republic","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2026","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 January 2026","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"31 January 2026","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"32","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"mmm2026","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/mmm2026.cz\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}