{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T22:05:34Z","timestamp":1772834734614,"version":"3.50.1"},"publisher-location":"Singapore","reference-count":15,"publisher":"Springer Nature Singapore","isbn-type":[{"value":"9789819543830","type":"print"},{"value":"9789819543847","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,11,6]],"date-time":"2025-11-06T00:00:00Z","timestamp":1762387200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,11,6]],"date-time":"2025-11-06T00:00:00Z","timestamp":1762387200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-981-95-4384-7_11","type":"book-chapter","created":{"date-parts":[[2025,11,5]],"date-time":"2025-11-05T15:42:45Z","timestamp":1762357365000},"page":"142-157","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Enhancing Multimodal Emotion Recognition via\u00a0Multi-feature Encoding and\u00a0Attention-Based Fusion"],"prefix":"10.1007","author":[{"given":"Xu","family":"Lin","sequence":"first","affiliation":[]},{"given":"Ke","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Hui","family":"Kang","sequence":"additional","affiliation":[]},{"given":"Xinying","family":"Wang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,11,6]]},"reference":[{"key":"11_CR1","doi-asserted-by":"publisher","unstructured":"Pang, J.-H., Hou, Z.-P., Li, Z.-N., et al.: A survey of multimodal emotion recognition research. J. Intell. Syst. 15(4), 633\u2013645 (2020). https:\/\/doi.org\/10.11992\/tis.202001032","DOI":"10.11992\/tis.202001032"},{"key":"11_CR2","doi-asserted-by":"crossref","unstructured":"Salas-C\u00e1ceres, J., Lorenzo-Navarro, J., Freire-Obreg\u00f3n, D. et al. Multimodal emotion recognition based on a fusion of audiovisual information with temporal dynamics. Multimed. Tools Appl. (2024). https:\/\/doi.org\/10.1007\/s11042-024-20227-6","DOI":"10.1007\/s11042-024-20227-6"},{"key":"11_CR3","doi-asserted-by":"publisher","unstructured":"Kova\u010devi\u0107, N., Holz, C., Gross, M., Wampfler, R.: On Multimodal emotion recognition for human-chatbot interaction in the wild. In: Proceedings International Conference on Multimodal Interaction (ICMI 2024), pp. 1\u201312. ACM, San Jose (2024). https:\/\/doi.org\/10.1145\/3678957.3685759","DOI":"10.1145\/3678957.3685759"},{"key":"11_CR4","unstructured":"Li, Y., Sun, Q., Murthy, S.M.K., Alturki, E., Schuller, B.W.: GatedxLSTM: a multimodal affective computing approach for emotion recognition in conversations. arXiv preprint arXiv:2503.20919 (2025). https:\/\/arxiv.org\/abs\/2503.20919"},{"key":"11_CR5","doi-asserted-by":"crossref","unstructured":"Zhao, S., et al.: An end-to-end visual-audio attention network for emotion recognition in user-generated videos. In: Proceeding 34th AAAI Conference on Artificial Intelligence (AAAI 2020), pp. 303\u2013311. AAAI Press, New York (2020)","DOI":"10.1609\/aaai.v34i01.5364"},{"key":"11_CR6","doi-asserted-by":"crossref","unstructured":"Avro, S.B.H., Taher, T., Mamun, N.: EmoTech: a multi-modal speech emotion recognition using multi-source low-level information with hybrid recurrent network. arXiv preprint arXiv:2501.12674 (2025). https:\/\/arxiv.org\/abs\/2501.12674","DOI":"10.1109\/SPICSCON64195.2024.10941488"},{"key":"11_CR7","unstructured":"Dai, W., Zheng, D., Yu, F., Zhang, Y., Hou, Y.: A novel approach to multimodal emotion recognition: multimodal semantic information fusion. arXiv preprint arXiv:2502.08573 (2025). https:\/\/arxiv.org\/abs\/2502.08573"},{"key":"11_CR8","doi-asserted-by":"crossref","unstructured":"Tzinis, E., Potamianos, A.: Segment-based speech emotion recognition using recurrent neural networks. In: Proceeding 7th International Conference on Affective Computing and Intelligent Interaction (ACII), pp. 190\u2013195. IEEE, San Antonio (2017)","DOI":"10.1109\/ACII.2017.8273599"},{"key":"11_CR9","volume":"132","author":"B Mocanu","year":"2023","unstructured":"Mocanu, B., Tapu, R., Zaharia, T.: Multimodal emotion recognition using cross-modal audio-video fusion with attention and deep metric learning. Image Vis. Comput. 132, 104664 (2023)","journal-title":"Image Vis. Comput."},{"key":"11_CR10","doi-asserted-by":"crossref","unstructured":"Moon, E., Sagar, A.S.M.S., Kim, H.S.: Multimodal Daily-Life Emotional Recognition Using Heart Rate and Speech Data from Wearables. IEEE Access 12, 1\u201310 (2024). https:\/\/ieeexplore.ieee.org\/document\/10402679. Accessed: Jan. 27, 2025","DOI":"10.1109\/ACCESS.2024.3427111"},{"key":"11_CR11","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.: ImageNet Classification with Deep Convolutional Neural Networks. In: Advanced Neural Information Processing System (NeurIPS), vol. 25, pp. 1097\u20131105. Curran Associates Inc., Red Hook (2012)"},{"key":"11_CR12","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Liu, W., Jia, Y., et al.: Going Deeper with Convolutions. In: Proceeding IEEE Conference Computing Vis. Pattern Recognition (CVPR), pp. 1\u20139. IEEE, Boston (2015)","DOI":"10.1109\/CVPR.2015.7298594"},{"key":"11_CR13","doi-asserted-by":"publisher","first-page":"35221","DOI":"10.1007\/s11042-023-16933-2","volume":"83","author":"A Pradhan","year":"2024","unstructured":"Pradhan, A., Srivastava, S.: Hybrid densenet with long short-term memory model for multi-modal emotion recognition from physiological signals. Multimed. Tools Appl. 83, 35221\u201335251 (2024). https:\/\/doi.org\/10.1007\/s11042-023-16933-2","journal-title":"Multimed. Tools Appl."},{"key":"11_CR14","unstructured":"Shayaninasab, M., Babaali, B.: Multi-modal emotion recognition by text, speech and video using pretrained transformers. arXiv preprint arXiv:2402.07327 (2024). https:\/\/arxiv.org\/abs\/2402.07327"},{"key":"11_CR15","unstructured":"Zaidi, S.A.M., Latif, S., Qadir, J.: Cross-Language speech emotion recognition using multimodal dual attention transformers. arXiv preprint arXiv:2306.13804 (2023). https:\/\/arxiv.org\/abs\/2306.13804"}],"container-title":["Lecture Notes in Computer Science","Neural Information Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-95-4384-7_11","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,6]],"date-time":"2026-03-06T14:08:10Z","timestamp":1772806090000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-95-4384-7_11"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,11,6]]},"ISBN":["9789819543830","9789819543847"],"references-count":15,"URL":"https:\/\/doi.org\/10.1007\/978-981-95-4384-7_11","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,11,6]]},"assertion":[{"value":"6 November 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICONIP","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Neural Information Processing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Okinawa","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Japan","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"20 November 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"24 November 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"32","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"iconip2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/iconip2025.apnns.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}