{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:08:45Z","timestamp":1775156925020,"version":"3.50.1"},"reference-count":375,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,2,18]],"date-time":"2026-02-18T00:00:00Z","timestamp":1771372800000},"content-version":"vor","delay-in-days":0,"URL":"http:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"DOI":"10.13039\/501100012632","name":"Nazarbayev University","doi-asserted-by":"publisher","award":["20122022FD4109"],"award-info":[{"award-number":["20122022FD4109"]}],"id":[{"id":"10.13039\/501100012632","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004561","name":"Ministry of Education and Science of the Republic of Kazakhstan","doi-asserted-by":"publisher","award":["AP23487613"],"award-info":[{"award-number":["AP23487613"]}],"id":[{"id":"10.13039\/501100004561","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004561","name":"Ministry of Education and Science of the Republic of Kazakhstan","doi-asserted-by":"publisher","award":["2024\\u20132026"],"award-info":[{"award-number":["2024\\u20132026"]}],"id":[{"id":"10.13039\/501100004561","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Intelligent Systems with Applications"],"published-print":{"date-parts":[[2026,5]]},"DOI":"10.1016\/j.iswa.2026.200642","type":"journal-article","created":{"date-parts":[[2026,2,23]],"date-time":"2026-02-23T17:03:51Z","timestamp":1771866231000},"page":"200642","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["State-of-the-art Multimodal Emotion Recognition: A comprehensive survey and taxonomy"],"prefix":"10.1016","volume":"30","author":[{"given":"Adnan","family":"Yazici","sequence":"first","affiliation":[]},{"given":"Tayfun","family":"Kucukyilmaz","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1665-5928","authenticated-orcid":false,"given":"Tansel","family":"Dokeroglu","sequence":"additional","affiliation":[]},{"given":"Aidana","family":"Sharipbay","sequence":"additional","affiliation":[]},{"given":"Min-Ho","family":"Lee","sequence":"additional","affiliation":[]},{"given":"Benjamin","family":"Tyler","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.iswa.2026.200642_b1","series-title":"Open-domain, content-based, multi-modal fact-checking of out-of-context images via online resources","author":"Abdelnabi","year":"2022"},{"issue":"01","key":"10.1016\/j.iswa.2026.200642_b2","doi-asserted-by":"crossref","first-page":"73","DOI":"10.38094\/jastt20291","article-title":"Multimodal emotion recognition using deep learning","volume":"2","author":"Abdullah","year":"2021","journal-title":"Journal of Applied Science and Technology Trends"},{"issue":"11","key":"10.1016\/j.iswa.2026.200642_b3","doi-asserted-by":"crossref","first-page":"688","DOI":"10.3390\/bioengineering9110688","article-title":"A survey on physiological signal-based emotion recognition","volume":"9","author":"Ahmad","year":"2022","journal-title":"Bioengineering"},{"key":"10.1016\/j.iswa.2026.200642_b4","series-title":"Proceedings of the 2023 IEEE\/ACM international conference on advances in social networks analysis and mining","first-page":"530","article-title":"Knowledge graph embedding for topical and entity classification in multi-source social network data","author":"Akinnubi","year":"2024"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b5","doi-asserted-by":"crossref","first-page":"374","DOI":"10.1109\/TAFFC.2017.2714671","article-title":"Emotions recognition using EEG signals: A survey","volume":"10","author":"Alarcao","year":"2017","journal-title":"IEEE Transactions on Affective Computing"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b6","doi-asserted-by":"crossref","first-page":"36","DOI":"10.17694\/bajece.1372107","article-title":"Multimodal emotion recognition using Bi-LG-GCN for MELD dataset","volume":"12","author":"Alsaadaw\u0131","year":"2024","journal-title":"Balkan Journal of Electrical and Computer Engineering"},{"key":"10.1016\/j.iswa.2026.200642_b7","doi-asserted-by":"crossref","unstructured":"Andalibi, N., & Buss, J. (2020). The human in emotion recognition on social media: Attitudes, outcomes, risks. In Proceedings of the 2020 CHI conference on human factors in computing systems (pp. 1\u201316).","DOI":"10.1145\/3313831.3376680"},{"issue":"23","key":"10.1016\/j.iswa.2026.200642_b8","doi-asserted-by":"crossref","DOI":"10.3390\/s19235218","article-title":"EEG-based multi-modal emotion recognition using bag of deep features: An optimal feature selection approach","volume":"19","author":"Asghar","year":"2019","journal-title":"Sensors"},{"key":"10.1016\/j.iswa.2026.200642_b9","series-title":"Proceedings of the 29th ACM SIGKDD conference on knowledge discovery and data mining","first-page":"3724","article-title":"Fusing multimodal signals on hyper-complex space for extreme abstractive text summarization (TL;DR) of scientific contents","author":"Atri","year":"2023"},{"issue":"5","key":"10.1016\/j.iswa.2026.200642_b10","doi-asserted-by":"crossref","first-page":"975","DOI":"10.1007\/s00138-018-0960-9","article-title":"Audiovisual emotion recognition in wild","volume":"30","author":"Avots","year":"2019","journal-title":"Machine Vision and Applications"},{"issue":"5","key":"10.1016\/j.iswa.2026.200642_b11","doi-asserted-by":"crossref","first-page":"6479","DOI":"10.1007\/s11042-022-13567-8","article-title":"A novel enhanced convolution neural network with extreme learning machine: facial emotional recognition in psychology practices","volume":"82","author":"Banskota","year":"2023","journal-title":"Multimedia Tools and Applications"},{"issue":"5","key":"10.1016\/j.iswa.2026.200642_b12","doi-asserted-by":"crossref","first-page":"691","DOI":"10.1037\/a0017088","article-title":"Emotion recognition from expressions in face, voice, and body: the Multimodal Emotion Recognition Test (MERT)","volume":"9","author":"B\u00e4nziger","year":"2009","journal-title":"Emotion"},{"key":"10.1016\/j.iswa.2026.200642_b13","series-title":"International conference on machine learning","first-page":"1692","article-title":"One transformer fits all distributions in multi-modal diffusion at scale","author":"Bao","year":"2023"},{"key":"10.1016\/j.iswa.2026.200642_b14","series-title":"2019 14th IEEE international conference on automatic face & gesture recognition","first-page":"1","article-title":"Towards a multimodal time-based empathy prediction system","author":"Barbieri","year":"2019"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b15","first-page":"1","article-title":"The theory of constructed emotion: an active inference account of interoception and categorization","volume":"12","author":"Barrett","year":"2017","journal-title":"Social Cognitive and Affective Neuroscience"},{"key":"10.1016\/j.iswa.2026.200642_b16","series-title":"2018 international joint conference on neural networks","first-page":"1","article-title":"The OMG-emotion behavior dataset","author":"Barros","year":"2018"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b17","doi-asserted-by":"crossref","first-page":"355","DOI":"10.1177\/0022022104273656","article-title":"Cross-cultural emotion recognition among Canadian ethnic groups","volume":"36","author":"Beaupr\u00e9","year":"2005","journal-title":"Journal of Cross-Cultural Psychology"},{"key":"10.1016\/j.iswa.2026.200642_b18","doi-asserted-by":"crossref","first-page":"399","DOI":"10.1007\/s00521-012-1228-3","article-title":"Audiovisual emotion recognition using ANOVA feature selection method and multi-classifier neural networks","volume":"24","author":"Bejani","year":"2014","journal-title":"Neural Computing and Applications"},{"key":"10.1016\/j.iswa.2026.200642_b19","series-title":"2020 15th IEEE international conference on automatic face and gesture recognition","first-page":"644","article-title":"How are you feeling? multimodal emotion learning for socially-assistive robot navigation","author":"Bera","year":"2020"},{"key":"10.1016\/j.iswa.2026.200642_b20","doi-asserted-by":"crossref","first-page":"635","DOI":"10.1016\/j.procs.2015.02.112","article-title":"Hybrid approach for emotion classification of audio conversation based on text and speech mining","volume":"46","author":"Bhaskar","year":"2015","journal-title":"Procedia Computer Science"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b21","doi-asserted-by":"crossref","first-page":"20696","DOI":"10.1038\/s41598-021-99998-z","article-title":"A deep learning model for classifying human facial expressions from infrared thermal images","volume":"11","author":"Bhattacharyya","year":"2021","journal-title":"Scientific Reports"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b22","first-page":"1136","article-title":"Examining the digital transformation and digital entrepreneurship: A PRISMA based systematic review","volume":"22","author":"Bhuiyan","year":"2024","journal-title":"Pakistan Journal of Life and Social Sciences"},{"key":"10.1016\/j.iswa.2026.200642_b23","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2023.107708","article-title":"Multimodal emotion recognition via convolutional neural networks: Comparison of different strategies on two multimodal datasets","volume":"130","author":"Bilotti","year":"2024","journal-title":"Engineering Applications of Artificial Intelligence"},{"key":"10.1016\/j.iswa.2026.200642_b24","doi-asserted-by":"crossref","unstructured":"Bodaghi, M., Hosseini, M., & Gottumukkala, R. (2024). A Multimodal Intermediate Fusion Network with Manifold Learning for Stress Detection. In 2024 IEEE 3rd international conference on computing and machine intelligence (pp. 1\u20138).","DOI":"10.1109\/ICMI60790.2024.10586177"},{"key":"10.1016\/j.iswa.2026.200642_b25","doi-asserted-by":"crossref","DOI":"10.1016\/j.eswa.2024.126236","article-title":"MIST: Multimodal emotion recognition using DeBERTa for text, Semi-CNN for speech, ResNet-50 for facial, and 3D-CNN for motion analysis","volume":"270","author":"Boitel","year":"2025","journal-title":"Expert Systems with Applications"},{"issue":"4","key":"10.1016\/j.iswa.2026.200642_b26","doi-asserted-by":"crossref","first-page":"2614","DOI":"10.1109\/TAFFC.2023.3265433","article-title":"Group synchrony for emotion recognition using physiological signals","volume":"14","author":"Bota","year":"2023","journal-title":"IEEE Transactions on Affective Computing"},{"key":"10.1016\/j.iswa.2026.200642_b27","doi-asserted-by":"crossref","first-page":"722","DOI":"10.1109\/LSP.2022.3151551","article-title":"Factors in emotion recognition with deep learning models using speech and text on multiple corpora","volume":"29","author":"Braunschweiler","year":"2022","journal-title":"IEEE Signal Processing Letters"},{"key":"10.1016\/j.iswa.2026.200642_b28","doi-asserted-by":"crossref","unstructured":"Bujnowski, P., Kuzma, B., Paziewski, B., Rutkowski, J., Marhula, J., Bordzicka, Z., & Andruszkiewicz, P. (2024). SAMSEMO: New dataset for multilingual and multimodal emotion recognition. In Proc. interspeech 2024 (pp. 2925\u20132929).","DOI":"10.21437\/Interspeech.2024-212"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b29","doi-asserted-by":"crossref","first-page":"67","DOI":"10.1109\/TAFFC.2016.2515617","article-title":"MSP-IMPROV: An acted corpus of dyadic interactions to study emotion perception","volume":"8","author":"Busso","year":"2016","journal-title":"IEEE Transactions on Affective Computing"},{"issue":"17","key":"10.1016\/j.iswa.2026.200642_b30","doi-asserted-by":"crossref","first-page":"7967","DOI":"10.3390\/app11177967","article-title":"Multi-modal emotion recognition using speech features and text-embedding","volume":"11","author":"Byun","year":"2021","journal-title":"Applied Sciences"},{"issue":"24","key":"10.1016\/j.iswa.2026.200642_b31","doi-asserted-by":"crossref","first-page":"5516","DOI":"10.3390\/s19245516","article-title":"Multimodal approach for emotion recognition based on simulated flight experiments","volume":"19","author":"C\u00e9sar Cavalcanti Roza","year":"2019","journal-title":"Sensors"},{"key":"10.1016\/j.iswa.2026.200642_b32","doi-asserted-by":"crossref","unstructured":"Chakraborty, S., Shubham, Singh, S., Kaur, M., Rakesh, N., & Gulhane, M. (2024). Emotion-Based Media Recommendation System. In 2024 IEEE 6th international conference on cybernetics, cognition and machine learning applications (pp. 370\u2013374).","DOI":"10.1109\/ICCCMLA63077.2024.10871354"},{"key":"10.1016\/j.iswa.2026.200642_b33","series-title":"Proceedings of the 2024 conference of the North American chapter of the association for computational linguistics: human language technologies (volume 3: system demonstrations)","author":"Chang","year":"2024"},{"key":"10.1016\/j.iswa.2026.200642_b34","doi-asserted-by":"crossref","first-page":"3567","DOI":"10.1109\/TMM.2023.3312917","article-title":"Semi-supervised domain adaptation for major depressive disorder detection","volume":"26","author":"Chen","year":"2024","journal-title":"IEEE Transactions on Multimedia"},{"issue":"7","key":"10.1016\/j.iswa.2026.200642_b35","first-page":"2430","article-title":"Label-less learning for emotion cognition","volume":"31","author":"Chen","year":"2019","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"key":"10.1016\/j.iswa.2026.200642_b36","article-title":"Coupled multimodal emotional feature analysis based on broad-deep fusion networks in human\u2013robot interaction","author":"Chen","year":"2023","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"key":"10.1016\/j.iswa.2026.200642_b37","series-title":"Proceedings of the 28th ACM SIGKDD conference on knowledge discovery and data mining","first-page":"118","article-title":"Multi-modal siamese network for entity alignment","author":"Chen","year":"2022"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b38","doi-asserted-by":"crossref","first-page":"187","DOI":"10.1109\/TCYB.2022.3185119","article-title":"Modeling hierarchical uncertainty for multimodal emotion recognition in conversation","volume":"54","author":"Chen","year":"2022","journal-title":"IEEE Transactions on Cybernetics"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b39","doi-asserted-by":"crossref","first-page":"671","DOI":"10.1007\/s11571-022-09851-w","article-title":"A multi-stage dynamical fusion network for multimodal emotion recognition","volume":"17","author":"Chen","year":"2023","journal-title":"Cognitive Neurodynamics"},{"key":"10.1016\/j.iswa.2026.200642_b40","doi-asserted-by":"crossref","first-page":"8669","DOI":"10.1007\/s00521-020-05616-w","article-title":"HEU Emotion: a large-scale database for multimodal emotion recognition in the wild","volume":"33","author":"Chen","year":"2021","journal-title":"Neural Computing and Applications"},{"issue":"9","key":"10.1016\/j.iswa.2026.200642_b41","doi-asserted-by":"crossref","first-page":"4236","DOI":"10.3390\/app12094236","article-title":"Research on emotion recognition for online learning in a novel computing model","volume":"12","author":"Chen","year":"2022","journal-title":"Applied Sciences"},{"key":"10.1016\/j.iswa.2026.200642_b42","series-title":"Proceedings of the 29th ACM SIGKDD conference on knowledge discovery and data mining","first-page":"213","article-title":"On hierarchical disentanglement of interactive behaviors for multimodal spatiotemporal data with incompleteness","author":"Chen","year":"2023"},{"key":"10.1016\/j.iswa.2026.200642_b43","doi-asserted-by":"crossref","first-page":"110805","DOI":"10.52202\/079017-3518","article-title":"Emotion-llama: Multimodal emotion recognition and reasoning with instruction tuning","volume":"37","author":"Cheng","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"10.1016\/j.iswa.2026.200642_b44","series-title":"Deep learning with Python","author":"Chollet","year":"2021"},{"key":"10.1016\/j.iswa.2026.200642_b45","series-title":"2017 seventh international conference on affective computing and intelligent interaction","first-page":"292","article-title":"NNIME: The NTHU-NTUA Chinese interactive multimodal emotion corpus","author":"Chou","year":"2017"},{"key":"10.1016\/j.iswa.2026.200642_b46","doi-asserted-by":"crossref","first-page":"168865","DOI":"10.1109\/ACCESS.2020.3023871","article-title":"Cross-subject multimodal emotion recognition based on hybrid fusion","volume":"8","author":"Cimtay","year":"2020","journal-title":"IEEE Access"},{"issue":"4","key":"10.1016\/j.iswa.2026.200642_b47","doi-asserted-by":"crossref","first-page":"335","DOI":"10.1177\/1754073913489751","article-title":"Psychological construction in the OCC model of emotion","volume":"5","author":"Clore","year":"2013","journal-title":"Emotion Review"},{"key":"10.1016\/j.iswa.2026.200642_b48","series-title":"2020 15th IEEE international conference on automatic face and gesture recognition","first-page":"93","article-title":"End-to-end facial and physiological model for affective computing and applications","author":"Comas","year":"2020"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b49","doi-asserted-by":"crossref","first-page":"338","DOI":"10.3390\/s23010338","article-title":"Wireless sensing technology combined with facial expression to realize multimodal emotion recognition","volume":"23","author":"Dang","year":"2022","journal-title":"Sensors"},{"issue":"16","key":"10.1016\/j.iswa.2026.200642_b50","doi-asserted-by":"crossref","first-page":"4551","DOI":"10.3390\/s20164551","article-title":"CNN and LSTM-based emotion charting using physiological signals","volume":"20","author":"Dar","year":"2020","journal-title":"Sensors"},{"key":"10.1016\/j.iswa.2026.200642_b51","doi-asserted-by":"crossref","DOI":"10.1016\/j.compbiomed.2022.105327","article-title":"EEG-based emotion charting for Parkinson\u2019s disease patients using Convolutional Recurrent Neural Networks and cross dataset learning","volume":"144","author":"Dar","year":"2022","journal-title":"Computers in Biology and Medicine"},{"key":"10.1016\/j.iswa.2026.200642_b52","article-title":"Convolutional neural networks on graphs with fast localized spectral filtering","volume":"29","author":"Defferrard","year":"2016","journal-title":"Advances in Neural Information Processing Systems"},{"key":"10.1016\/j.iswa.2026.200642_b53","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2024.102711","article-title":"Multiplex graph aggregation and feature refinement for unsupervised incomplete multimodal emotion recognition","volume":"114","author":"Deng","year":"2025","journal-title":"Information Fusion"},{"key":"10.1016\/j.iswa.2026.200642_b54","doi-asserted-by":"crossref","unstructured":"Dhall, A., Goecke, R., Ghosh, S., Joshi, J., Hoey, J., & Gedeon, T. (2017). From individual to group-level emotion recognition: Emotiw 5.0. In Proceedings of the 19th ACM international conference on multimodal interaction (pp. 524\u2013528).","DOI":"10.1145\/3136755.3143004"},{"key":"10.1016\/j.iswa.2026.200642_b55","series-title":"2021 international wireless communications and mobile computing","first-page":"681","article-title":"Emotion recognition for healthcare surveillance systems using neural networks: A survey","author":"Dhuheir","year":"2021"},{"key":"10.1016\/j.iswa.2026.200642_b56","doi-asserted-by":"crossref","DOI":"10.1016\/j.eswa.2023.122579","article-title":"Deep CNN with late fusion for real time multimodal emotion recognition","volume":"240","author":"Dixit","year":"2024","journal-title":"Expert Systems with Applications"},{"key":"10.1016\/j.iswa.2026.200642_b57","first-page":"1","article-title":"Deep neural network-based fusion model for emotion recognition using visual data","author":"Do","year":"2021","journal-title":"Journal of Supercomputing"},{"key":"10.1016\/j.iswa.2026.200642_b58","doi-asserted-by":"crossref","first-page":"269","DOI":"10.1016\/j.neucom.2022.04.083","article-title":"A comprehensive survey on recent metaheuristics for feature selection","volume":"494","author":"Dokeroglu","year":"2022","journal-title":"Neurocomputing"},{"key":"10.1016\/j.iswa.2026.200642_b59","series-title":"An audio-video deep and transfer learning framework for multimodal emotion recognition in the wild","author":"Dresvyanskiy","year":"2020"},{"key":"10.1016\/j.iswa.2026.200642_b60","series-title":"Proceedings of the 40th international conference on machine learning","article-title":"PaLM-E: an embodied multimodal language model","author":"Driess","year":"2023"},{"issue":"7","key":"10.1016\/j.iswa.2026.200642_b61","doi-asserted-by":"crossref","first-page":"4570","DOI":"10.1109\/TITS.2020.3007357","article-title":"A convolution bidirectional long short-term memory neural network for driver emotion recognition","volume":"22","author":"Du","year":"2020","journal-title":"IEEE Transactions on Intelligent Transportation Systems"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b62","doi-asserted-by":"crossref","first-page":"591","DOI":"10.1109\/TAFFC.2020.3023966","article-title":"An emotion recognition method for game evaluation based on electroencephalogram","volume":"14","author":"Du","year":"2020","journal-title":"IEEE Transactions on Affective Computing"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b63","doi-asserted-by":"crossref","first-page":"592","DOI":"10.3390\/s20030592","article-title":"Human emotion recognition: Review of sensors and methods","volume":"20","author":"Dzedzickis","year":"2020","journal-title":"Sensors"},{"issue":"5","key":"10.1016\/j.iswa.2026.200642_b64","doi-asserted-by":"crossref","first-page":"1505","DOI":"10.1111\/bjet.12992","article-title":"Multimodal learning analytics for game-based learning","volume":"51","author":"Emerson","year":"2020","journal-title":"British Journal of Educational Technology"},{"key":"10.1016\/j.iswa.2026.200642_b65","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2023.101847","article-title":"Emotion recognition from unimodal to multimodal analysis: A review","volume":"99","author":"Ezzameli","year":"2023","journal-title":"Information Fusion"},{"key":"10.1016\/j.iswa.2026.200642_b66","series-title":"FAF: A novel multimodal emotion recognition approach integrating face, body and text","author":"Fang","year":"2022"},{"key":"10.1016\/j.iswa.2026.200642_b67","doi-asserted-by":"crossref","first-page":"7517","DOI":"10.1109\/TMM.2022.3222965","article-title":"Multi-modal cross-domain alignment network for video moment retrieval","volume":"25","author":"Fang","year":"2023","journal-title":"IEEE Transactions on Multimedia"},{"key":"10.1016\/j.iswa.2026.200642_b68","series-title":"Multimodal emotion recognition and sentiment analysis in multi-party conversation contexts","author":"Farhadipour","year":"2025"},{"key":"10.1016\/j.iswa.2026.200642_b69","doi-asserted-by":"crossref","first-page":"92","DOI":"10.1016\/j.specom.2020.12.001","article-title":"Fusion of deep learning features with mixture of brain emotional learning for audio-visual emotion recognition","volume":"127","author":"Farhoudi","year":"2021","journal-title":"Speech Communication"},{"key":"10.1016\/j.iswa.2026.200642_b70","series-title":"2011 third international conference on intelligent networking and collaborative systems","first-page":"68","article-title":"Endowing e-learning systems with emotion awareness","author":"Feidakis","year":"2011"},{"key":"10.1016\/j.iswa.2026.200642_b71","series-title":"Proceedings of the 29th ACM SIGKDD conference on knowledge discovery and data mining","first-page":"4035","article-title":"FedMultimodal: A benchmark for multimodal federated learning","author":"Feng","year":"2023"},{"key":"10.1016\/j.iswa.2026.200642_b72","series-title":"Emowoz: A large-scale corpus and labelling scheme for emotion recognition in task-oriented dialogue systems","author":"Feng","year":"2021"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b73","doi-asserted-by":"crossref","first-page":"40","DOI":"10.3390\/info16010040","article-title":"Meaningful multimodal emotion recognition based on capsule graph transformer architecture","volume":"16","author":"Filali","year":"2025","journal-title":"Information"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b74","doi-asserted-by":"crossref","first-page":"387","DOI":"10.1007\/s11063-021-10636-1","article-title":"Meaningful learning for deep facial emotional features","volume":"54","author":"Filali","year":"2022","journal-title":"Neural Processing Letters"},{"key":"10.1016\/j.iswa.2026.200642_b75","doi-asserted-by":"crossref","unstructured":"Firdaus, M., Chauhan, H., Ekbal, A. Bhattacharyya, P. (2020). MEISD: A multimodal multi-label emotion, intensity and sentiment dialogue dataset for emotion recognition and sentiment analysis in conversations. In Proceedings of the 28th international conference on computational linguistics (pp. 4441\u20134453).","DOI":"10.18653\/v1\/2020.coling-main.393"},{"key":"10.1016\/j.iswa.2026.200642_b76","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2024.112825","article-title":"SDR-GNN: Spectral Domain Reconstruction Graph Neural Network for incomplete multimodal learning in conversational emotion recognition","volume":"309","author":"Fu","year":"2025","journal-title":"Knowledge-Based Systems"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b77","doi-asserted-by":"crossref","first-page":"91","DOI":"10.1109\/MMUL.2022.3173430","article-title":"Context- and knowledge-aware graph convolutional network for multimodal emotion recognition","volume":"29","author":"Fu","year":"2022","journal-title":"IEEE MultiMedia"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b78","doi-asserted-by":"crossref","first-page":"91","DOI":"10.1109\/MMUL.2022.3173430","article-title":"Context-and knowledge-aware graph convolutional network for multimodal emotion recognition","volume":"29","author":"Fu","year":"2022","journal-title":"IEEE Multimedia"},{"key":"10.1016\/j.iswa.2026.200642_b79","doi-asserted-by":"crossref","DOI":"10.1016\/j.neunet.2024.106764","article-title":"HiMul-LGG: A hierarchical decision fusion-based local\u2013global graph neural network for multimodal emotion recognition in conversation","volume":"181","author":"Fu","year":"2025","journal-title":"Neural Networks"},{"key":"10.1016\/j.iswa.2026.200642_b80","series-title":"2024 46th annual international conference of the IEEE engineering in medicine and biology society","first-page":"1","article-title":"Beyond the game: Multimodal emotion recognition before, during, and after gameplay","author":"Ganiti-Roumeliotou","year":"2024"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b81","doi-asserted-by":"crossref","first-page":"375","DOI":"10.1109\/TMM.2018.2859590","article-title":"The labeled multiple canonical correlation analysis for information fusion","volume":"21","author":"Gao","year":"2018","journal-title":"IEEE Transactions on Multimedia"},{"key":"10.1016\/j.iswa.2026.200642_b82","doi-asserted-by":"crossref","unstructured":"Garcia-Hernandez, R. A., Celaya-Padilla, J. M., & Luna-Garcia, H. (2023). Multimodal Emotion Recognition Model Using AI With the Aim of Addressing Gender-Based Violence. In Proceedings of the XI latin American conference on human computer interaction (pp. 1\u20133).","DOI":"10.1145\/3630970.3631065"},{"key":"10.1016\/j.iswa.2026.200642_b83","article-title":"Multimodal emotion recognition with deep learning: advancements, challenges, and future directions","volume":"105","author":"Geetha","year":"2024","journal-title":"Information Fusion"},{"issue":"8","key":"10.1016\/j.iswa.2026.200642_b84","doi-asserted-by":"crossref","first-page":"11239","DOI":"10.1007\/s11042-022-13557-w","article-title":"Joint modelling of audio-visual cues using attention mechanisms for emotion recognition","volume":"82","author":"Ghaleb","year":"2023","journal-title":"Multimedia Tools and Applications"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b85","first-page":"37","article-title":"Metric learning-based multimodal audio-visual emotion recognition","volume":"27","author":"Ghaleb","year":"2019","journal-title":"IEEE Multimedia"},{"key":"10.1016\/j.iswa.2026.200642_b86","series-title":"Dialoguegcn: A graph convolutional neural network for emotion recognition in conversation","author":"Ghosal","year":"2019"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b87","doi-asserted-by":"crossref","first-page":"440","DOI":"10.1109\/TAFFC.2019.2927337","article-title":"Review on psychological stress detection using biosignals","volume":"13","author":"Giannakakis","year":"2019","journal-title":"IEEE Transactions on Affective Computing"},{"key":"10.1016\/j.iswa.2026.200642_b88","article-title":"Survey on multimodal approaches to emotion recognition","volume":"556","author":"Gladys","year":"2023","journal-title":"Neurocomputing"},{"key":"10.1016\/j.iswa.2026.200642_b89","series-title":"2022 4th international conference on smart systems and inventive technology","first-page":"1625","article-title":"Ravdness, crema-d, tess based algorithm for emotion recognition using speech","author":"Gokilavani","year":"2022"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b90","doi-asserted-by":"crossref","first-page":"2014","DOI":"10.1109\/TCSS.2023.3298324","article-title":"Cross-cultural emotion recognition with EEG and eye movement signals based on multiple stacked broad learning system","volume":"11","author":"Gong","year":"2023","journal-title":"IEEE Transactions on Computational Social Systems"},{"key":"10.1016\/j.iswa.2026.200642_b91","series-title":"Deep learning","author":"Goodfellow","year":"2016"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b92","doi-asserted-by":"crossref","first-page":"236","DOI":"10.1109\/TASSP.1984.1164317","article-title":"Signal estimation from modified short-time Fourier transform","volume":"32","author":"Griffin","year":"1984","journal-title":"IEEE Transactions on Acoustics, Speech and Signal Processing"},{"key":"10.1016\/j.iswa.2026.200642_b93","series-title":"2021 international conference on culture-oriented science & technology","first-page":"77","article-title":"Multimodal emotion recognition in deep learning: a survey","author":"Gu","year":"2021"},{"key":"10.1016\/j.iswa.2026.200642_b94","article-title":"Multimodal emotion recognition: Emotion classification through the integration of EEG and facial expressions","author":"G\u00fcler","year":"2025","journal-title":"IEEE Access"},{"key":"10.1016\/j.iswa.2026.200642_b95","series-title":"Emotion recognition based on multi-modal electrophysiology multi-head attention Contrastive Learning","author":"Guo","year":"2023"},{"issue":"7","key":"10.1016\/j.iswa.2026.200642_b96","doi-asserted-by":"crossref","first-page":"10099","DOI":"10.1007\/s11042-022-13360-7","article-title":"Learning inter-class optical flow difference using generative adversarial networks for facial expression recognition","volume":"82","author":"Guo","year":"2023","journal-title":"Multimedia Tools and Applications"},{"key":"10.1016\/j.iswa.2026.200642_b97","first-page":"2","article-title":"AI-driven emotional recognition in digital ads: A novel approach to consumer engagement","volume":"131","author":"Gupta","year":"2023","journal-title":"Journal of Marketing & Supply Chain Management"},{"issue":"5","key":"10.1016\/j.iswa.2026.200642_b98","doi-asserted-by":"crossref","first-page":"1049","DOI":"10.1007\/s11760-020-01830-0","article-title":"Video-based person-dependent and person-independent facial emotion recognition","volume":"15","author":"Hajarolasvadi","year":"2021","journal-title":"Signal, Image and Video Processing"},{"key":"10.1016\/j.iswa.2026.200642_b99","doi-asserted-by":"crossref","first-page":"218499","DOI":"10.1109\/ACCESS.2020.3042328","article-title":"Generative adversarial networks in human emotion synthesis: A review","volume":"8","author":"Hajarolasvadi","year":"2020","journal-title":"IEEE Access"},{"issue":"13","key":"10.1016\/j.iswa.2026.200642_b100","doi-asserted-by":"crossref","first-page":"2933","DOI":"10.3390\/electronics12132933","article-title":"Physiological signal-based real-time emotion recognition based on exploiting mutual information with physiologically common features","volume":"12","author":"Han","year":"2023","journal-title":"Electronics"},{"key":"10.1016\/j.iswa.2026.200642_b101","doi-asserted-by":"crossref","first-page":"42","DOI":"10.1016\/j.neucom.2020.01.048","article-title":"Visual-audio emotion recognition based on multi-task and ensemble learning with multiple features","volume":"391","author":"Hao","year":"2020","journal-title":"Neurocomputing"},{"key":"10.1016\/j.iswa.2026.200642_b102","series-title":"UR-FUNNY: A multimodal language dataset for understanding humor","author":"Hasan","year":"2019"},{"key":"10.1016\/j.iswa.2026.200642_b103","doi-asserted-by":"crossref","first-page":"10","DOI":"10.1016\/j.inffus.2018.10.009","article-title":"Human emotion recognition using deep belief network architecture","volume":"51","author":"Hassan","year":"2019","journal-title":"Information Fusion"},{"key":"10.1016\/j.iswa.2026.200642_b104","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2024.108339","article-title":"Using transformers for multimodal emotion recognition: Taxonomies and state of the art review","volume":"133","author":"Hazmoune","year":"2024","journal-title":"Engineering Applications of Artificial Intelligence"},{"issue":"10","key":"10.1016\/j.iswa.2026.200642_b105","doi-asserted-by":"crossref","first-page":"687","DOI":"10.3390\/brainsci10100687","article-title":"Advances in multimodal emotion recognition based on brain\u2013computer interfaces","volume":"10","author":"He","year":"2020","journal-title":"Brain Sciences"},{"key":"10.1016\/j.iswa.2026.200642_b106","first-page":"1","article-title":"Facial expression and action unit recognition augmented by their dependencies on graph convolutional networks","author":"He","year":"2021","journal-title":"Journal on Multimodal User Interfaces"},{"key":"10.1016\/j.iswa.2026.200642_b107","doi-asserted-by":"crossref","first-page":"20727","DOI":"10.1109\/ACCESS.2022.3149214","article-title":"Adaptive multimodal emotion detection architecture for social robots","volume":"10","author":"Heredia","year":"2022","journal-title":"IEEE Access"},{"key":"10.1016\/j.iswa.2026.200642_b108","doi-asserted-by":"crossref","first-page":"61672","DOI":"10.1109\/ACCESS.2020.2984368","article-title":"Multimodal approach of speech emotion recognition using multi-level multi-head fusion attention-based recurrent neural network","volume":"8","author":"Ho","year":"2020","journal-title":"IEEE Access"},{"key":"10.1016\/j.iswa.2026.200642_b109","series-title":"Central European conference on information and intelligent systems","first-page":"3","article-title":"A brief overview of affective multimedia databases","author":"Horvat","year":"2017"},{"issue":"9","key":"10.1016\/j.iswa.2026.200642_b110","doi-asserted-by":"crossref","first-page":"5318","DOI":"10.1109\/TCSVT.2023.3247822","article-title":"Semantic alignment network for multi-modal emotion recognition","volume":"33","author":"Hou","year":"2023","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"issue":"15","key":"10.1016\/j.iswa.2026.200642_b111","doi-asserted-by":"crossref","first-page":"12527","DOI":"10.1007\/s00521-022-07292-4","article-title":"Human emotion recognition from EEG-based brain\u2013computer interface using machine learning: a comprehensive review","volume":"34","author":"Houssein","year":"2022","journal-title":"Neural Computing and Applications"},{"key":"10.1016\/j.iswa.2026.200642_b112","series-title":"MMGCN: Multimodal fusion via deep graph convolution network for emotion recognition in conversation","author":"Hu","year":"2021"},{"key":"10.1016\/j.iswa.2026.200642_b113","doi-asserted-by":"crossref","first-page":"26","DOI":"10.1016\/j.knosys.2019.01.019","article-title":"Image\u2013text sentiment analysis via deep multimodal attentive fusion","volume":"167","author":"Huang","year":"2019","journal-title":"Knowledge-Based Systems"},{"key":"10.1016\/j.iswa.2026.200642_b114","first-page":"1","article-title":"Multimodality in online education: a comparative study","author":"Immadisetty","year":"2025","journal-title":"Multimedia Tools and Applications"},{"key":"10.1016\/j.iswa.2026.200642_b115","doi-asserted-by":"crossref","DOI":"10.1016\/j.bspc.2024.106241","article-title":"Enhanced multimodal emotion recognition in healthcare analytics: A deep learning based model-level fusion approach","volume":"94","author":"Islam","year":"2024","journal-title":"Biomedical Signal Processing and Control"},{"key":"10.1016\/j.iswa.2026.200642_b116","doi-asserted-by":"crossref","DOI":"10.1016\/j.bspc.2020.101894","article-title":"Speech emotion recognition with deep convolutional neural networks","volume":"59","author":"Issa","year":"2020","journal-title":"Biomedical Signal Processing and Control"},{"issue":"4","key":"10.1016\/j.iswa.2026.200642_b117","doi-asserted-by":"crossref","first-page":"363","DOI":"10.1177\/1754073910374661","article-title":"The many meanings\/aspects of emotion: Definitions, functions, activation, and regulation","volume":"2","author":"Izard","year":"2010","journal-title":"Emotion Review"},{"key":"10.1016\/j.iswa.2026.200642_b118","series-title":"Speech emotion recognition using support vector machine","author":"Jain","year":"2020"},{"key":"10.1016\/j.iswa.2026.200642_b119","series-title":"2022 4th international conference on advances in computing, communication control and networking","first-page":"903","article-title":"Multimodal emotion recognition using deep learning techniques","author":"James","year":"2022"},{"key":"10.1016\/j.iswa.2026.200642_b120","series-title":"RecSys posters","first-page":"11","article-title":"Music emotion recognition via end-to-end multimodal neural networks","author":"Jeon","year":"2017"},{"key":"10.1016\/j.iswa.2026.200642_b121","doi-asserted-by":"crossref","unstructured":"Jia, Z., Lin, Y., Wang, J., Feng, Z., Xie, X., & Chen, C. (2021). HetEmotionNet: two-stream heterogeneous graph recurrent neural network for multi-modal emotion recognition. In Proceedings of the 29th ACM international conference on multimedia (pp. 1047\u20131056).","DOI":"10.1145\/3474085.3475583"},{"key":"10.1016\/j.iswa.2026.200642_b122","series-title":"Bridging discrete and continuous: A multimodal strategy for complex emotion detection","author":"Jia","year":"2024"},{"issue":"22","key":"10.1016\/j.iswa.2026.200642_b123","doi-asserted-by":"crossref","first-page":"32265","DOI":"10.1007\/s11042-022-13091-9","article-title":"A multimodal emotion recognition model integrating speech, video and MoCAP","volume":"81","author":"Jia","year":"2022","journal-title":"Multimedia Tools and Applications"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b124","doi-asserted-by":"crossref","first-page":"1082","DOI":"10.1007\/s12559-023-10119-6","article-title":"CSAT-FTCN: a fuzzy-oriented model with contextual self-attention network for multimodal emotion recognition","volume":"15","author":"Jiang","year":"2023","journal-title":"Cognitive Computation"},{"key":"10.1016\/j.iswa.2026.200642_b125","doi-asserted-by":"crossref","unstructured":"Jin, M., & Li, J. (2023). Graph to grid: Learning deep representations for multimodal emotion recognition. In Proceedings of the 31st ACM international conference on multimedia (pp. 5985\u20135993).","DOI":"10.1145\/3581783.3612074"},{"key":"10.1016\/j.iswa.2026.200642_b126","series-title":"EvoFA: Evolvable fast adaptation for EEG emotion recognition","author":"Jin","year":"2024"},{"key":"10.1016\/j.iswa.2026.200642_b127","doi-asserted-by":"crossref","first-page":"1","DOI":"10.2352\/EI.2022.34.15.COLOR-261","article-title":"Image segmentation for content-color-dependent screening (CCDS) using U-net","volume":"34","author":"Jumabayeva","year":"2022","journal-title":"Electronic Imaging"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b128","first-page":"96","article-title":"Utilizing deep learning towards multi-modal bio-sensing and vision-based affective computing","volume":"13","author":"Jung","year":"2019","journal-title":"IEEE Transactions on Affective Computing"},{"issue":"5","key":"10.1016\/j.iswa.2026.200642_b129","first-page":"17","article-title":"A systematic review on emotion recognition system using physiological signals: Data acquisition and methodology","volume":"6","author":"K.","year":"2022","journal-title":"Emotion Science Journal"},{"key":"10.1016\/j.iswa.2026.200642_b130","doi-asserted-by":"crossref","unstructured":"Kahou, S. E., Pal, C., Bouthillier, X., Froumenty, P., G\u00fcl\u00e7ehre, \u00c7., Memisevic, R., Vincent, P., Courville, A., Bengio, Y., Ferrari, R. C., et al. (2013). Combining modality specific deep neural networks for emotion recognition in video. In Proceedings of the 15th ACM on international conference on multimodal interaction (pp. 543\u2013550).","DOI":"10.1145\/2522848.2531745"},{"key":"10.1016\/j.iswa.2026.200642_b131","doi-asserted-by":"crossref","DOI":"10.1109\/ACCESS.2024.3430850","article-title":"A systematic review on multimodal emotion recognition: building blocks, current state, applications, and challenges","author":"Kalateh","year":"2024","journal-title":"IEEE Access"},{"key":"10.1016\/j.iswa.2026.200642_b132","doi-asserted-by":"crossref","first-page":"46","DOI":"10.1016\/j.inffus.2018.09.001","article-title":"Deep learning analysis of mobile physiological, environmental and location sensor data for emotion detection","volume":"49","author":"Kanjo","year":"2019","journal-title":"Information Fusion"},{"key":"10.1016\/j.iswa.2026.200642_b133","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1109\/TIM.2023.3347790","article-title":"An EEG-based computational model for decoding emotional intelligence, personality, and emotions","volume":"73","author":"Kannadasan","year":"2023","journal-title":"IEEE Transactions on Instrumentation and Measurement"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b134","doi-asserted-by":"crossref","first-page":"756","DOI":"10.1109\/TAFFC.2019.2961089","article-title":"An active learning paradigm for online audio-visual emotion recognition","volume":"13","author":"Kansizoglou","year":"2019","journal-title":"IEEE Transactions on Affective Computing"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b135","doi-asserted-by":"crossref","first-page":"98","DOI":"10.1109\/JBHI.2017.2688239","article-title":"DREAMER: A database for emotion recognition through EEG and ECG signals from wireless low-cost off-the-shelf devices","volume":"22","author":"Katsigiannis","year":"2017","journal-title":"IEEE Journal of Biomedical and Health Informatics"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b136","doi-asserted-by":"crossref","DOI":"10.1111\/exsy.13403","article-title":"Evaluating significant features in context-aware multimodal emotion recognition with XAI methods","volume":"42","author":"Khalane","year":"2025","journal-title":"Expert Systems"},{"key":"10.1016\/j.iswa.2026.200642_b137","series-title":"2017 international conference on wireless communications, signal processing and networking","first-page":"1017","article-title":"Emotion recognition using prosodie and spectral features of speech and Na\u00efve Bayes Classifier","author":"Khan","year":"2017"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b138","doi-asserted-by":"crossref","first-page":"5473","DOI":"10.1038\/s41598-025-89202-x","article-title":"MemoCMT: multimodal emotion recognition using cross-modal transformer-based feature fusion","volume":"15","author":"Khan","year":"2025","journal-title":"Scientific Reports"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b139","doi-asserted-by":"crossref","first-page":"115","DOI":"10.1007\/s00530-024-01302-2","article-title":"Exploring contactless techniques in multimodal emotion recognition: insights into diverse applications, challenges, solutions, and prospects","volume":"30","author":"Khan","year":"2024","journal-title":"Multimedia Systems"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b140","doi-asserted-by":"crossref","first-page":"478","DOI":"10.1109\/TCSS.2022.3228649","article-title":"RobinNet: A multimodal speech emotion recognition system with speaker recognition for social interactions","volume":"11","author":"Khurana","year":"2022","journal-title":"IEEE Transactions on Computational Social Systems"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b141","doi-asserted-by":"crossref","first-page":"223","DOI":"10.1109\/TAFFC.2017.2695999","article-title":"Multi-objective based spatio-temporal feature representation learning robust to expression intensity variations for facial expression recognition","volume":"10","author":"Kim","year":"2017","journal-title":"IEEE Transactions on Affective Computing"},{"key":"10.1016\/j.iswa.2026.200642_b142","doi-asserted-by":"crossref","unstructured":"Kim, K., & Cho, N. (2023). Focus-attention-enhanced crossmodal transformer with metric learning for multimodal speech emotion recognition. In 24th annual conference of the international speech communication association, interspeech (pp. 2673\u20132677).","DOI":"10.21437\/Interspeech.2023-555"},{"issue":"15","key":"10.1016\/j.iswa.2026.200642_b143","doi-asserted-by":"crossref","first-page":"5753","DOI":"10.3390\/s22155753","article-title":"SMaTE: A segment-level feature mixing and temporal encoding framework for facial expression recognition","volume":"22","author":"Kim","year":"2022","journal-title":"Sensors"},{"key":"10.1016\/j.iswa.2026.200642_b144","doi-asserted-by":"crossref","DOI":"10.1016\/j.eswa.2024.123723","article-title":"Enhancing emotion recognition using multimodal fusion of physiological, environmental, personal data","volume":"249","author":"Kim","year":"2024","journal-title":"Expert Systems with Applications"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b145","doi-asserted-by":"crossref","first-page":"45","DOI":"10.1007\/s10772-020-09672-4","article-title":"Feature extraction algorithms to improve the speech emotion recognition rate","volume":"23","author":"Koduru","year":"2020","journal-title":"International Journal of Speech Technology"},{"key":"10.1016\/j.iswa.2026.200642_b146","series-title":"Grounding language models to images for multimodal inputs and outputs","author":"Koh","year":"2023"},{"key":"10.1016\/j.iswa.2026.200642_b147","series-title":"Expression, affect, action unit recognition: Aff-wild2, multi-task learning and arcface","author":"Kollias","year":"2019"},{"issue":"17","key":"10.1016\/j.iswa.2026.200642_b148","doi-asserted-by":"crossref","first-page":"7962","DOI":"10.3390\/app11177962","article-title":"Deep multimodal emotion recognition on human speech: A review","volume":"11","author":"Koromilas","year":"2021","journal-title":"Applied Sciences"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b149","doi-asserted-by":"crossref","first-page":"1022","DOI":"10.1109\/TPAMI.2019.2944808","article-title":"Sewa db: A rich database for audio-visual emotion and sentiment research in the wild","volume":"43","author":"Kossaifi","year":"2019","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"10.1016\/j.iswa.2026.200642_b150","article-title":"Multimodal emotion recognition using feature fusion: An llm-based approach","author":"Kumar","year":"2024","journal-title":"IEEE Access"},{"issue":"16","key":"10.1016\/j.iswa.2026.200642_b151","doi-asserted-by":"crossref","first-page":"24369","DOI":"10.1007\/s11042-023-14753-y","article-title":"A comparative study on facial expression recognition using local binary patterns, convolutional neural network and frequency neural network","volume":"82","author":"Kumar","year":"2023","journal-title":"Multimedia Tools and Applications"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b152","doi-asserted-by":"crossref","first-page":"274","DOI":"10.1038\/s41597-022-01402-6","article-title":"BIRAFFE2, a multimodal dataset for emotion-based personalization in rich affective game environments","volume":"9","author":"Kutt","year":"2022","journal-title":"Scientific Data"},{"key":"10.1016\/j.iswa.2026.200642_b153","first-page":"1","article-title":"Recognition of emotions in speech using deep CNN and RESNET","author":"Lakshmi","year":"2023","journal-title":"Soft Computing"},{"key":"10.1016\/j.iswa.2026.200642_b154","series-title":"2020 international joint conference on neural networks","first-page":"1","article-title":"Multimodal emotion recognition using deep generalized canonical correlation analysis with an attention mechanism","author":"Lan","year":"2020"},{"issue":"4","key":"10.1016\/j.iswa.2026.200642_b155","first-page":"68","article-title":"Speech emotion recognition: a review","volume":"2","author":"Lanjewar","year":"2013","journal-title":"International Journal of Innovative Technology and Exploring Engineering (IJITEE)"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b156","doi-asserted-by":"crossref","first-page":"3","DOI":"10.1177\/1754073919897295","article-title":"Cross-cultural emotion recognition and in-group advantage in vocal expression: A meta-analysis","volume":"13","author":"Laukka","year":"2021","journal-title":"Emotion Review"},{"key":"10.1016\/j.iswa.2026.200642_b157","doi-asserted-by":"crossref","first-page":"14742","DOI":"10.1109\/ACCESS.2023.3244390","article-title":"Multi-label multimodal emotion recognition with transformer-based fusion and emotion-level representation learning","volume":"11","author":"Le","year":"2023","journal-title":"IEEE Access"},{"issue":"7553","key":"10.1016\/j.iswa.2026.200642_b158","doi-asserted-by":"crossref","first-page":"436","DOI":"10.1038\/nature14539","article-title":"Deep learning","volume":"521","author":"LeCun","year":"2015","journal-title":"Nature"},{"key":"10.1016\/j.iswa.2026.200642_b159","doi-asserted-by":"crossref","first-page":"94557","DOI":"10.1109\/ACCESS.2021.3092735","article-title":"Multimodal emotion recognition fusion analysis adapting BERT with heterogeneous feature unification","volume":"9","author":"Lee","year":"2021","journal-title":"IEEE Access"},{"issue":"9","key":"10.1016\/j.iswa.2026.200642_b160","doi-asserted-by":"crossref","first-page":"1162","DOI":"10.1016\/j.specom.2011.06.004","article-title":"Emotion recognition using a hierarchical binary decision tree approach","volume":"53","author":"Lee","year":"2011","journal-title":"Speech Communication"},{"key":"10.1016\/j.iswa.2026.200642_b161","series-title":"Healthcare","first-page":"322","article-title":"Emotion detection based on pupil variation","volume":"Vol. 11","author":"Lee","year":"2023"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b162","doi-asserted-by":"crossref","first-page":"1026","DOI":"10.1038\/s41597-024-03838-4","article-title":"EAV: EEG-audio-video dataset for emotion recognition in conversational contexts","volume":"11","author":"Lee","year":"2024","journal-title":"Scientific Data"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b163","doi-asserted-by":"crossref","DOI":"10.1016\/j.ipm.2019.102185","article-title":"Exploring temporal representations by leveraging attention-based bidirectional LSTM-RNNs for multi-modal emotion recognition","volume":"57","author":"Li","year":"2020","journal-title":"Information Processing & Management"},{"key":"10.1016\/j.iswa.2026.200642_b164","series-title":"TF-mamba: Text-enhanced fusion mamba with missing modalities for robust multimodal sentiment analysis","author":"Li","year":"2025"},{"key":"10.1016\/j.iswa.2026.200642_b165","doi-asserted-by":"crossref","unstructured":"Li, B., Fei, H., Liao, L., Zhao, Y., Teng, C., Chua, T.-S., Ji, D., & Li, F. (2023). Revisiting disentanglement and fusion on modality and context in conversational multimodal emotion recognition. In Proceedings of the 31st ACM international conference on multimedia (pp. 5923\u20135934).","DOI":"10.1145\/3581783.3612053"},{"key":"10.1016\/j.iswa.2026.200642_b166","series-title":"Proceedings of the 31st ACM international conference on multimedia","first-page":"1314","article-title":"IGG: Improved graph generation for domain adaptive object detection","author":"Li","year":"2023"},{"key":"10.1016\/j.iswa.2026.200642_b167","series-title":"2018 first Asian conference on affective computing and intelligent interaction","first-page":"1","article-title":"Mec 2017: Multimodal emotion recognition challenge","author":"Li","year":"2018"},{"key":"10.1016\/j.iswa.2026.200642_b168","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2023.126427","article-title":"GraphMFT: A graph network based multimodal fusion technique for emotion recognition in conversation","volume":"550","author":"Li","year":"2023","journal-title":"Neurocomputing"},{"key":"10.1016\/j.iswa.2026.200642_b169","doi-asserted-by":"crossref","DOI":"10.1016\/j.eswa.2024.125822","article-title":"A twin disentanglement Transformer Network with Hierarchical-Level Feature Reconstruction for robust multimodal emotion recognition","volume":"264","author":"Li","year":"2025","journal-title":"Expert Systems with Applications"},{"key":"10.1016\/j.iswa.2026.200642_b170","first-page":"28515","article-title":"Toward robust incomplete multimodal sentiment analysis via hierarchical representation learning","volume":"37","author":"Li","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"10.1016\/j.iswa.2026.200642_b171","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2024.129306","article-title":"Global distilling framework with cognitive gravitation for multimodal emotion recognition","volume":"622","author":"Li","year":"2025","journal-title":"Neurocomputing"},{"key":"10.1016\/j.iswa.2026.200642_b172","series-title":"AffectGPT: A new dataset, model, and benchmark for emotion understanding with multimodal large language models","author":"Lian","year":"2025"},{"key":"10.1016\/j.iswa.2026.200642_b173","doi-asserted-by":"crossref","first-page":"96","DOI":"10.1007\/s11633-019-1176-9","article-title":"Expression analysis based on face regions in real-world conditions","volume":"17","author":"Lian","year":"2020","journal-title":"International Journal of Automation and Computing"},{"key":"10.1016\/j.iswa.2026.200642_b174","series-title":"MER 2025: When affective computing meets large language models","author":"Lian","year":"2025"},{"issue":"10","key":"10.1016\/j.iswa.2026.200642_b175","doi-asserted-by":"crossref","first-page":"1440","DOI":"10.3390\/e25101440","article-title":"A survey of deep learning-based multimodal emotion recognition: Speech, text, and face","volume":"25","author":"Lian","year":"2023","journal-title":"Entropy"},{"key":"10.1016\/j.iswa.2026.200642_b176","series-title":"Merbench: A unified evaluation benchmark for multimodal emotion recognition","author":"Lian","year":"2024"},{"key":"10.1016\/j.iswa.2026.200642_b177","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2024.102367","article-title":"Gpt-4v with emotion: A zero-shot benchmark for generalized emotion recognition","volume":"108","author":"Lian","year":"2024","journal-title":"Information Fusion"},{"key":"10.1016\/j.iswa.2026.200642_b178","series-title":"ICASSP 2019-2019 IEEE international conference on acoustics, speech and signal processing","first-page":"4000","article-title":"Cross-culture multimodal emotion recognition with adversarial learning","author":"Liang","year":"2019"},{"key":"10.1016\/j.iswa.2026.200642_b179","unstructured":"Liang, P. P., Salakhutdinov, R., & Morency, L.-P. (2018). Computational modeling of human multimodal language: The mosei dataset and interpretable dynamic fusion. Vol. 1, In First workshop and grand challenge on computational modeling of human multimodal language (p. 3)."},{"key":"10.1016\/j.iswa.2026.200642_b180","doi-asserted-by":"crossref","first-page":"825","DOI":"10.1613\/jair.1.15301","article-title":"Multi-modal attentive prompt learning for few-shot emotion recognition in conversations","volume":"79","author":"Liang","year":"2024","journal-title":"Journal of Artificial Intelligence Research"},{"key":"10.1016\/j.iswa.2026.200642_b181","series-title":"Applying machine learning techniques to bioinformatics: Few-shot and zero-shot methods: Few-shot and zero-shot methods","author":"Lilhore","year":"2024"},{"issue":"8","key":"10.1016\/j.iswa.2026.200642_b182","doi-asserted-by":"crossref","first-page":"2384","DOI":"10.3390\/s20082384","article-title":"Emotion recognition using eye-tracking: taxonomy, review and current challenges","volume":"20","author":"Lim","year":"2020","journal-title":"Sensors"},{"issue":"4","key":"10.1016\/j.iswa.2026.200642_b183","first-page":"4447","article-title":"Dual contrastive prediction for incomplete multi-view representation learning","volume":"45","author":"Lin","year":"2023","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"key":"10.1016\/j.iswa.2026.200642_b184","doi-asserted-by":"crossref","first-page":"1686","DOI":"10.1162\/tacl_a_00628","article-title":"Missmodal: Increasing robustness to missing modality in multimodal sentiment analysis","volume":"11","author":"Lin","year":"2023","journal-title":"Transactions of the Association for Computational Linguistics"},{"key":"10.1016\/j.iswa.2026.200642_b185","series-title":"Proceedings of the 28th ACM SIGKDD conference on knowledge discovery and data mining","first-page":"3299","article-title":"Duplex conversation: Towards human-like interaction in spoken dialogue systems","author":"Lin","year":"2022"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b186","doi-asserted-by":"crossref","first-page":"22","DOI":"10.1007\/s10723-021-09564-0","article-title":"Speech expression multimodal emotion recognition based on deep belief network","volume":"19","author":"Liu","year":"2021","journal-title":"Journal of Grid Computing"},{"key":"10.1016\/j.iswa.2026.200642_b187","doi-asserted-by":"crossref","first-page":"2193","DOI":"10.1109\/TASLP.2023.3282092","article-title":"Dual-tbnet: Improving the robustness of speech features via dual-transformer-bilstm for speech emotion recognition","volume":"31","author":"Liu","year":"2023","journal-title":"IEEE\/ACM Transactions on Audio, Speech, and Language Processing"},{"key":"10.1016\/j.iswa.2026.200642_b188","article-title":"EEG-based multimodal emotion recognition: A machine learning perspective","author":"Liu","year":"2024","journal-title":"IEEE Transactions on Instrumentation and Measurement"},{"key":"10.1016\/j.iswa.2026.200642_b189","series-title":"CCF international conference on natural language processing and Chinese computing","first-page":"389","article-title":"DiGTF: A difference-guided two-stage fusion framework for multimodal sentiment analysis","author":"Liu","year":"2025"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b190","doi-asserted-by":"crossref","first-page":"715","DOI":"10.1109\/TCDS.2021.3071170","article-title":"Comparing recognition performance and robustness of multimodal deep learning models for multimodal emotion recognition","volume":"14","author":"Liu","year":"2021","journal-title":"IEEE Transactions on Cognitive and Developmental Systems"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b191","doi-asserted-by":"crossref","DOI":"10.1155\/2023\/9645611","article-title":"Multimodal emotion recognition based on cascaded multichannel and hierarchical fusion","volume":"2023","author":"Liu","year":"2023","journal-title":"Computational Intelligence and Neuroscience"},{"key":"10.1016\/j.iswa.2026.200642_b192","article-title":"Judging the emotional states of customer service staff in the workplace: a multimodal dataset analysis","volume":"13","author":"Liu","year":"2022","journal-title":"Frontiers in Psychology"},{"issue":"5","key":"10.1016\/j.iswa.2026.200642_b193","doi-asserted-by":"crossref","DOI":"10.1371\/journal.pone.0196391","article-title":"The Ryerson Audio-Visual Database of Emotional Speech and Song (RAVDESS): A dynamic, multimodal set of facial and vocal expressions in North American English","volume":"13","author":"Livingstone","year":"2018","journal-title":"PloS One"},{"key":"10.1016\/j.iswa.2026.200642_b194","doi-asserted-by":"crossref","DOI":"10.1016\/j.bspc.2024.106620","article-title":"CMLP-Net: A convolution-multilayer perceptron network for EEG-based emotion recognition","volume":"96","author":"Lu","year":"2024","journal-title":"Biomedical Signal Processing and Control"},{"key":"10.1016\/j.iswa.2026.200642_b195","series-title":"Multi-grained multimodal interaction network for entity linking","first-page":"1583","author":"Luo","year":"2023"},{"key":"10.1016\/j.iswa.2026.200642_b196","doi-asserted-by":"crossref","unstructured":"Ly, S. T., Do, N.-T., Lee, G., Kim, S.-H., & Yang, H.-J. (2019). Multimodal 2D and 3D for In-The-Wild Facial Expression Recognition. In Cvpr workshops (pp. 2927\u20132934).","DOI":"10.1109\/CVPRW.2019.00353"},{"key":"10.1016\/j.iswa.2026.200642_b197","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2025.110004","article-title":"Multimodal emotion recognition by fusing complementary patterns from central to peripheral neurophysiological signals across feature domains","volume":"143","author":"Ma","year":"2025","journal-title":"Engineering Applications of Artificial Intelligence"},{"key":"10.1016\/j.iswa.2026.200642_b198","article-title":"A transformer-based model with self-distillation for multimodal emotion recognition in conversations","author":"Ma","year":"2023","journal-title":"IEEE Transactions on Multimedia"},{"key":"10.1016\/j.iswa.2026.200642_b199","series-title":"ICASSP 2023-2023 IEEE international conference on acoustics, speech and signal processing","first-page":"1","article-title":"Multimodal emotion recognition based on deep temporal features using cross-modal transformer and self-attention","author":"Maji","year":"2023"},{"issue":"10","key":"10.1016\/j.iswa.2026.200642_b200","doi-asserted-by":"crossref","first-page":"4199","DOI":"10.3390\/app14104199","article-title":"Enhancing multimodal emotion recognition through attention mechanisms in BERT and CNN architectures","volume":"14","author":"Makhmudov","year":"2024","journal-title":"Applied Sciences"},{"key":"10.1016\/j.iswa.2026.200642_b201","article-title":"Learning graph embeddings for open world compositional zero-shot learning","author":"Mancini","year":"2022","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence"},{"issue":"18","key":"10.1016\/j.iswa.2026.200642_b202","doi-asserted-by":"crossref","first-page":"5163","DOI":"10.3390\/s20185163","article-title":"Emotion recognition in immersive virtual reality: From statistics to affective computing","volume":"20","author":"Mar\u00edn-Morales","year":"2020","journal-title":"Sensors"},{"issue":"25","key":"10.1016\/j.iswa.2026.200642_b203","doi-asserted-by":"crossref","first-page":"38667","DOI":"10.1007\/s11042-023-15118-1","article-title":"Deep fusion framework for speech command recognition using acoustic and linguistic features","volume":"82","author":"Mehra","year":"2023","journal-title":"Multimedia Tools and Applications"},{"key":"10.1016\/j.iswa.2026.200642_b204","doi-asserted-by":"crossref","first-page":"261","DOI":"10.1007\/BF02686918","article-title":"Pleasure-arousal-dominance: A general framework for describing and measuring individual differences in temperament","volume":"14","author":"Mehrabian","year":"1996","journal-title":"Current Psychology"},{"key":"10.1016\/j.iswa.2026.200642_b205","doi-asserted-by":"crossref","DOI":"10.1109\/TAI.2024.3445325","article-title":"Deep imbalanced learning for multimodal emotion recognition in conversations","author":"Meng","year":"2024","journal-title":"IEEE Transactions on Artificial Intelligence"},{"key":"10.1016\/j.iswa.2026.200642_b206","doi-asserted-by":"crossref","DOI":"10.1109\/TASLP.2024.3434495","article-title":"Masked graph learning with recurrent alignment for multimodal emotion recognition in conversation","author":"Meng","year":"2024","journal-title":"IEEE\/ACM Transactions on Audio, Speech, and Language Processing"},{"issue":"12","key":"10.1016\/j.iswa.2026.200642_b207","article-title":"CAG-MoE: Multimodal emotion recognition with cross-attention gated mixture of experts","volume":"13","author":"Mengara Mengara","year":"2025","journal-title":"Mathematics (2227-7390)"},{"issue":"12","key":"10.1016\/j.iswa.2026.200642_b208","doi-asserted-by":"crossref","first-page":"1437","DOI":"10.3390\/math9121437","article-title":"Multi-output learning based on multimodal GCN and co-attention for image aesthetics and emotion analysis","volume":"9","author":"Miao","year":"2021","journal-title":"Mathematics"},{"key":"10.1016\/j.iswa.2026.200642_b209","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2022.108580","article-title":"Deep learning based multimodal emotion recognition using model-level fusion of audio\u2013visual modalities","volume":"244","author":"Middya","year":"2022","journal-title":"Knowledge-Based Systems"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b210","doi-asserted-by":"crossref","first-page":"479","DOI":"10.1109\/TAFFC.2018.2884461","article-title":"Amigos: A dataset for affect, personality and mood research on individuals and groups","volume":"12","author":"Miranda-Correa","year":"2018","journal-title":"IEEE Transactions on Affective Computing"},{"key":"10.1016\/j.iswa.2026.200642_b211","doi-asserted-by":"crossref","unstructured":"Mittal, T., Bhattacharya, U., Chandra, R., Bera, A., & Manocha, D. (2020). M3er: Multiplicative multimodal emotion recognition using facial, textual, and speech cues. Vol. 34, In Proceedings of the AAAI conference on artificial intelligence (pp. 1359\u20131367).","DOI":"10.1609\/aaai.v34i02.5492"},{"key":"10.1016\/j.iswa.2026.200642_b212","series-title":"2023 international symposium on signals, circuits and systems","first-page":"1","article-title":"Facial emotion recognition using video visual transformer and attention dropping","author":"Mocanu","year":"2023"},{"key":"10.1016\/j.iswa.2026.200642_b213","doi-asserted-by":"crossref","DOI":"10.1016\/j.imavis.2023.104676","article-title":"Multimodal emotion recognition using cross modal audio-video fusion with attention and deep metric learning","volume":"133","author":"Mocanu","year":"2023","journal-title":"Image and Vision Computing"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b214","doi-asserted-by":"crossref","first-page":"239","DOI":"10.1162\/coli_a_00433","article-title":"Ethics sheet for automatic emotion recognition and sentiment analysis","volume":"48","author":"Mohammad","year":"2022","journal-title":"Computational Linguistics"},{"issue":"8","key":"10.1016\/j.iswa.2026.200642_b215","doi-asserted-by":"crossref","first-page":"9320","DOI":"10.1007\/s11227-022-05026-w","article-title":"Emotion recognition framework using multiple modalities for an effective human\u2013computer interaction","volume":"79","author":"Moin","year":"2023","journal-title":"Journal of Supercomputing"},{"key":"10.1016\/j.iswa.2026.200642_b216","doi-asserted-by":"crossref","DOI":"10.1109\/ACCESS.2024.3427111","article-title":"Multimodal daily-life emotional recognition using heart rate and speech data from wearables","author":"Moon","year":"2024","journal-title":"IEEE Access"},{"issue":"4","key":"10.1016\/j.iswa.2026.200642_b217","doi-asserted-by":"crossref","first-page":"2970","DOI":"10.1109\/TAFFC.2023.3250460","article-title":"Driver emotion recognition with a hybrid attentional multimodal fusion framework","volume":"14","author":"Mou","year":"2023","journal-title":"IEEE Transactions on Affective Computing"},{"key":"10.1016\/j.iswa.2026.200642_b218","doi-asserted-by":"crossref","first-page":"225463","DOI":"10.1109\/ACCESS.2020.3027026","article-title":"Automatic emotion recognition using temporal multimodal deep learning","volume":"8","author":"Nakisa","year":"2020","journal-title":"IEEE Access"},{"key":"10.1016\/j.iswa.2026.200642_b219","series-title":"Machine intelligence and smart systems: Proceedings of MISS 2021","first-page":"473","article-title":"Fixed-MAML for few-shot classification in multilingual speech emotion recognition","author":"Naman","year":"2022"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b220","doi-asserted-by":"crossref","first-page":"910","DOI":"10.1016\/j.bbe.2020.04.005","article-title":"Comparison of different feature extraction methods for EEG-based emotion recognition","volume":"40","author":"Nawaz","year":"2020","journal-title":"Biocybernetics and Biomedical Engineering"},{"key":"10.1016\/j.iswa.2026.200642_b221","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2025.113396","article-title":"Enhancing multimodal emotion recognition with dynamic fuzzy membership and attention fusion","volume":"165","author":"Nguyen","year":"2026","journal-title":"Engineering Applications of Artificial Intelligence"},{"key":"10.1016\/j.iswa.2026.200642_b222","doi-asserted-by":"crossref","first-page":"33","DOI":"10.1016\/j.cviu.2018.06.005","article-title":"Deep spatio-temporal feature fusion with compact bilinear pooling for multimodal emotion recognition","volume":"174","author":"Nguyen","year":"2018","journal-title":"Computer Vision and Image Understanding"},{"key":"10.1016\/j.iswa.2026.200642_b223","doi-asserted-by":"crossref","first-page":"1313","DOI":"10.1109\/TMM.2021.3063612","article-title":"Deep auto-encoders with sequential learning for multimodal dimensional emotion recognition","volume":"24","author":"Nguyen","year":"2021","journal-title":"IEEE Transactions on Multimedia"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b224","doi-asserted-by":"crossref","first-page":"62","DOI":"10.1109\/TKDE.2023.3283520","article-title":"CityTrans: Domain-adversarial training with knowledge transfer for spatio-temporal prediction across cities","volume":"36","author":"Ouyang","year":"2024","journal-title":"IEEE Transactions on Knowledge and Data Engineering"},{"key":"10.1016\/j.iswa.2026.200642_b225","doi-asserted-by":"crossref","unstructured":"P, S. K., & Ronickom, J. F. A. (2023). Enhancing Emotion Recognition: Machine Learning with Phasic Spectrogram Texture Features. In 2023 IEEE 5th international conference on cybernetics, cognition and machine learning applications (pp. 600\u2013603).","DOI":"10.1109\/ICCCMLA58983.2023.10346619"},{"key":"10.1016\/j.iswa.2026.200642_b226","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2023.126866","article-title":"A review of multimodal emotion recognition from datasets, preprocessing, features, and fusion methods","volume":"561","author":"Pan","year":"2023","journal-title":"Neurocomputing"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b227","doi-asserted-by":"crossref","first-page":"1903","DOI":"10.1007\/s12652-021-03407-2","article-title":"Multimodal emotion recognition based on feature selection and extreme learning machine in video clips","volume":"14","author":"Pan","year":"2023","journal-title":"Journal of Ambient Intelligence and Humanized Computing"},{"issue":"5","key":"10.1016\/j.iswa.2026.200642_b228","doi-asserted-by":"crossref","first-page":"1619","DOI":"10.3390\/app10051619","article-title":"EEG-based emotion recognition using logistic regression with Gaussian kernel and Laplacian prior and investigation of critical frequency bands","volume":"10","author":"Pan","year":"2020","journal-title":"Applied Sciences"},{"key":"10.1016\/j.iswa.2026.200642_b229","series-title":"Proceedings of the global ai congress 2019","first-page":"399","article-title":"Multimodal system for emotion recognition using EEG and customer review","author":"Panda","year":"2020"},{"key":"10.1016\/j.iswa.2026.200642_b230","unstructured":"Panda, R. E. S., Malheiro, R., Rocha, B., Oliveira, A. P., & Paiva, R. P. (2013). Multi-modal music emotion recognition: A new dataset, methodology and comparative analysis. In 10th international symposium on computer music multidisciplinary research (pp. 570\u2013582)."},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b231","doi-asserted-by":"crossref","first-page":"293","DOI":"10.1038\/s41597-020-00630-y","article-title":"K-EmoCon, a multimodal sensor dataset for continuous emotion recognition in naturalistic conversations","volume":"7","author":"Park","year":"2020","journal-title":"Scientific Data"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b232","doi-asserted-by":"crossref","first-page":"124","DOI":"10.62411\/faith.2024-22","article-title":"A reinforcement learning-based approach for promoting mental health using multimodal emotion recognition","volume":"1","author":"Pathirana","year":"2024","journal-title":"Journal of Future Artificial Intelligence and Technologies"},{"key":"10.1016\/j.iswa.2026.200642_b233","doi-asserted-by":"crossref","first-page":"15563","DOI":"10.1007\/s11042-020-10329-2","article-title":"Convolution neural network based automatic speech emotion recognition using Mel-frequency Cepstrum coefficients","volume":"80","author":"Pawar","year":"2021","journal-title":"Multimedia Tools and Applications"},{"key":"10.1016\/j.iswa.2026.200642_b234","doi-asserted-by":"crossref","first-page":"42","DOI":"10.1016\/j.neucom.2019.09.037","article-title":"An efficient model-level fusion approach for continuous affect recognition from audiovisual signals","volume":"376","author":"Pei","year":"2020","journal-title":"Neurocomputing"},{"key":"10.1016\/j.iswa.2026.200642_b235","doi-asserted-by":"crossref","first-page":"10218","DOI":"10.1109\/ACCESS.2023.3240420","article-title":"A framework to evaluate fusion methods for multimodal emotion recognition","volume":"11","author":"Pe\u00f1a","year":"2023","journal-title":"IEEE Access"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b236","doi-asserted-by":"crossref","first-page":"81","DOI":"10.1007\/s00530-024-01618-z","article-title":"Hierarchical heterogeneous graph network based multimodal emotion recognition in conversation","volume":"31","author":"Peng","year":"2025","journal-title":"Multimedia Systems"},{"issue":"11","key":"10.1016\/j.iswa.2026.200642_b237","doi-asserted-by":"crossref","first-page":"3484","DOI":"10.3390\/s24113484","article-title":"Systematic review of emotion detection with computer vision and deep learning","volume":"24","author":"Pereira","year":"2024","journal-title":"Sensors"},{"key":"10.1016\/j.iswa.2026.200642_b238","series-title":"RAMAS: Russian Multimodal Corpus of Dyadic Interaction for studying emotion recognition","author":"Perepelkina","year":"2018"},{"issue":"5","key":"10.1016\/j.iswa.2026.200642_b239","doi-asserted-by":"crossref","first-page":"131","DOI":"10.1007\/s10462-025-11126-9","article-title":"A review on EEG-based multimodal learning for emotion recognition","volume":"58","author":"Pillalamarri","year":"2025","journal-title":"Artificial Intelligence Review"},{"key":"10.1016\/j.iswa.2026.200642_b240","doi-asserted-by":"crossref","unstructured":"Pini, S., Ahmed, O. B., Cornia, M., Baraldi, L., Cucchiara, R., & Huet, B. (2017). Modeling multimodal cues in a deep learning-based framework for emotion recognition in the wild. In Proceedings of the 19th ACM international conference on multimodal interaction (pp. 536\u2013543).","DOI":"10.1145\/3136755.3143006"},{"key":"10.1016\/j.iswa.2026.200642_b241","doi-asserted-by":"crossref","first-page":"523","DOI":"10.1016\/j.procs.2017.10.038","article-title":"Enhancing CNN with preprocessing stage in automatic emotion recognition","volume":"116","author":"Pitaloka","year":"2017","journal-title":"Procedia Computer Science"},{"issue":"4","key":"10.1016\/j.iswa.2026.200642_b242","doi-asserted-by":"crossref","first-page":"344","DOI":"10.1511\/2001.28.344","article-title":"The nature of emotions: Human emotions have deep evolutionary roots, a fact that may explain their complexity and provide tools for clinical practice","volume":"89","author":"Plutchik","year":"2001","journal-title":"American Scientist"},{"key":"10.1016\/j.iswa.2026.200642_b243","doi-asserted-by":"crossref","first-page":"104","DOI":"10.1016\/j.neunet.2014.10.005","article-title":"Towards an intelligent framework for multimodal affective data analysis","volume":"63","author":"Poria","year":"2015","journal-title":"Neural Networks"},{"key":"10.1016\/j.iswa.2026.200642_b244","series-title":"Meld: A multimodal multi-party dataset for emotion recognition in conversations","author":"Poria","year":"2018"},{"key":"10.1016\/j.iswa.2026.200642_b245","doi-asserted-by":"crossref","first-page":"100943","DOI":"10.1109\/ACCESS.2019.2929050","article-title":"Emotion recognition in conversation: Research challenges, datasets, and recent advances","volume":"7","author":"Poria","year":"2019","journal-title":"IEEE Access"},{"key":"10.1016\/j.iswa.2026.200642_b246","doi-asserted-by":"crossref","DOI":"10.1016\/j.bspc.2023.104624","article-title":"Hierarchical extreme puzzle learning machine-based emotion recognition using multimodal physiological signals","volume":"83","author":"Pradhan","year":"2023","journal-title":"Biomedical Signal Processing and Control"},{"key":"10.1016\/j.iswa.2026.200642_b247","doi-asserted-by":"crossref","first-page":"32","DOI":"10.3389\/fnhum.2020.00032","article-title":"Multimodal recognition of emotions in music and facial expressions","volume":"14","author":"Proverbio","year":"2020","journal-title":"Frontiers in Human Neuroscience"},{"issue":"6","key":"10.1016\/j.iswa.2026.200642_b248","article-title":"Multimodal emotion recognition: A comprehensive review, trends, and challenges","volume":"14","author":"Ramaswamy","year":"2024","journal-title":"Wiley Interdisciplinary Reviews: Data Mining and Knowledge Discovery"},{"key":"10.1016\/j.iswa.2026.200642_b249","series-title":"2020 15th IEEE international conference on automatic face and gesture recognition","first-page":"629","article-title":"Multi-modal expression recognition in the wild using sequence modeling","author":"Rasipuram","year":"2020"},{"issue":"9","key":"10.1016\/j.iswa.2026.200642_b250","doi-asserted-by":"crossref","first-page":"4373","DOI":"10.3390\/s23094373","article-title":"A hybrid multimodal emotion recognition framework for UX evaluation using generalized mixture functions","volume":"23","author":"Razzaq","year":"2023","journal-title":"Sensors"},{"key":"10.1016\/j.iswa.2026.200642_b251","doi-asserted-by":"crossref","DOI":"10.1016\/j.jvcir.2023.103846","article-title":"Multi-loop graph convolutional network for multimodal conversational emotion recognition","volume":"94","author":"Ren","year":"2023","journal-title":"Journal of Visual Communication and Image Representation"},{"key":"10.1016\/j.iswa.2026.200642_b252","series-title":"2022 9th international conference on computing for sustainable global development","first-page":"74","article-title":"Using kernel shap xai method to optimize the network anomaly detection model","author":"Roshan","year":"2022"},{"key":"10.1016\/j.iswa.2026.200642_b253","series-title":"2023 IEEE international conference on acoustics, speech, and signal processing workshops","first-page":"1","article-title":"A vector quantized masked autoencoder for speech emotion recognition","author":"Sadok","year":"2023"},{"key":"10.1016\/j.iswa.2026.200642_b254","doi-asserted-by":"crossref","unstructured":"Saffaryazdi, N., Goonesekera, Y., Saffaryazdi, N., Hailemariam, N. D., Temesgen, E. G., Nanayakkara, S., Broadbent, E., & Billinghurst, M. (2022). Emotion recognition in conversations using brain and physiological signals. In Proceedings of the 27th international conference on intelligent user interfaces (pp. 229\u2013242).","DOI":"10.1145\/3490099.3511148"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b255","doi-asserted-by":"crossref","first-page":"1876","DOI":"10.1109\/TAFFC.2022.3176135","article-title":"Emotion recognition for everyday life using physiological signals from wearables: A systematic literature review","volume":"14","author":"Saganowski","year":"2022","journal-title":"IEEE Transactions on Affective Computing"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b256","doi-asserted-by":"crossref","first-page":"167","DOI":"10.1016\/j.eij.2020.07.005","article-title":"A 3D-convolutional neural network framework with ensemble learning techniques for multi-modal emotion recognition","volume":"22","author":"Salama","year":"2021","journal-title":"Egyptian Informatics Journal"},{"key":"10.1016\/j.iswa.2026.200642_b257","series-title":"Recent advances in recurrent neural networks","author":"Salehinejad","year":"2017"},{"key":"10.1016\/j.iswa.2026.200642_b258","doi-asserted-by":"crossref","DOI":"10.1016\/j.fss.2025.109419","article-title":"Multimodal multimedia information retrieval through the integration of fuzzy clustering, OWA-based fusion, and Siamese neural networks","author":"Sattari","year":"2025","journal-title":"Fuzzy Sets and Systems"},{"key":"10.1016\/j.iswa.2026.200642_b259","series-title":"Proceedings of the 40th international conference on machine learning","first-page":"30119","article-title":"Facial expression recognition with adaptive frame rate based on multiple testing correction","volume":"vol. 202","author":"Savchenko","year":"2023"},{"key":"10.1016\/j.iswa.2026.200642_b260","doi-asserted-by":"crossref","unstructured":"Saxen, F., Werner, P., & Al-Hamadi, A. (2017). Real vs. fake emotion challenge: Learning to rank authenticity from facial activity descriptors. In Proceedings of the IEEE international conference on computer vision workshops (pp. 3073\u20133078).","DOI":"10.1109\/ICCVW.2017.363"},{"key":"10.1016\/j.iswa.2026.200642_b261","doi-asserted-by":"crossref","first-page":"94281","DOI":"10.1109\/ACCESS.2023.3310428","article-title":"Multi-modal CNN features fusion for emotion recognition: A modified xception model","volume":"11","author":"Shahzad","year":"2023","journal-title":"IEEE Access"},{"key":"10.1016\/j.iswa.2026.200642_b262","doi-asserted-by":"crossref","DOI":"10.1016\/j.bspc.2022.103970","article-title":"A novel spatio-temporal convolutional neural framework for multimodal emotion recognition","volume":"78","author":"Sharafi","year":"2022","journal-title":"Biomedical Signal Processing and Control"},{"key":"10.1016\/j.iswa.2026.200642_b263","series-title":"Proceedings of the 25th international conference on multimodal interaction","first-page":"51","article-title":"Annotations from speech and heart rate: impact on multimodal emotion recognition","author":"Sharma","year":"2023"},{"issue":"4","key":"10.1016\/j.iswa.2026.200642_b264","doi-asserted-by":"crossref","first-page":"527","DOI":"10.1108\/IJICC-07-2020-0088","article-title":"Facial expression recognition based on bidirectional gated recurrent units within deep residual network","volume":"13","author":"Shen","year":"2020","journal-title":"International Journal of Intelligent Computing and Cybernetics"},{"key":"10.1016\/j.iswa.2026.200642_b265","series-title":"PMG : Personalized multimodal generation with large language models","author":"Shen","year":"2024"},{"key":"10.1016\/j.iswa.2026.200642_b266","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2021.107868","article-title":"Multi-task learning for gait-based identity recognition and emotion recognition using attention enhanced temporal graph convolutional network","volume":"114","author":"Sheng","year":"2021","journal-title":"Pattern Recognition"},{"key":"10.1016\/j.iswa.2026.200642_b267","series-title":"Multilogue-net: A context aware rnn for multi-modal emotion detection and sentiment analysis in conversation","author":"Shenoy","year":"2020"},{"key":"10.1016\/j.iswa.2026.200642_b268","series-title":"A comprehensive survey on multi-modal conversational emotion recognition with deep learning","author":"Shou","year":"2023"},{"issue":"6","key":"10.1016\/j.iswa.2026.200642_b269","doi-asserted-by":"crossref","first-page":"47","DOI":"10.3390\/mti6060047","article-title":"A survey on databases for multimodal emotion recognition and an introduction to the VIRI (visible and InfraRed image) database","volume":"6","author":"Siddiqui","year":"2022","journal-title":"Multimodal Technologies and Interaction"},{"issue":"6","key":"10.1016\/j.iswa.2026.200642_b270","doi-asserted-by":"crossref","DOI":"10.1111\/exsy.13239","article-title":"Stress recognition with multi-modal sensing using bootstrapped ensemble deep learning model","volume":"40","author":"Singh","year":"2023","journal-title":"Expert Systems"},{"key":"10.1016\/j.iswa.2026.200642_b271","doi-asserted-by":"crossref","first-page":"472","DOI":"10.33564\/IJEAST.2020.v04i12.083","article-title":"A review paper on emotion recognition","volume":"4","author":"Singh","year":"2020","journal-title":"International Journal of Engineering Applied Sciences and Technology"},{"key":"10.1016\/j.iswa.2026.200642_b272","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2021.107316","article-title":"A multimodal hierarchical approach to speech emotion recognition from audio and text","volume":"229","author":"Singh","year":"2021","journal-title":"Knowledge-Based Systems"},{"key":"10.1016\/j.iswa.2026.200642_b273","doi-asserted-by":"crossref","first-page":"176274","DOI":"10.1109\/ACCESS.2020.3026823","article-title":"Multimodal emotion recognition with transformer-based self supervised feature fusion","volume":"8","author":"Siriwardhana","year":"2020","journal-title":"IEEE Access"},{"key":"10.1016\/j.iswa.2026.200642_b274","doi-asserted-by":"crossref","first-page":"12177","DOI":"10.1109\/ACCESS.2019.2891579","article-title":"MPED: A multi-modal physiological emotion database for discrete emotion recognition","volume":"7","author":"Song","year":"2019","journal-title":"IEEE Access"},{"key":"10.1016\/j.iswa.2026.200642_b275","doi-asserted-by":"crossref","DOI":"10.3389\/frobt.2020.532279","article-title":"Emotion recognition for human-robot interaction: Recent advances and future perspectives","volume":"7","author":"Spezialetti","year":"2020","journal-title":"Frontiers in Robotics and AI"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b276","doi-asserted-by":"crossref","first-page":"1334","DOI":"10.1109\/TAFFC.2021.3097002","article-title":"The multimodal sentiment analysis in car reviews (muse-car) dataset: Collection, insights and improvements","volume":"14","author":"Stappen","year":"2021","journal-title":"IEEE Transactions on Affective Computing"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b277","doi-asserted-by":"crossref","first-page":"147","DOI":"10.1109\/TAFFC.2016.2625250","article-title":"ASCERTAIN: Emotion and personality recognition using commercial sensors","volume":"9","author":"Subramanian","year":"2016","journal-title":"IEEE Transactions on Affective Computing"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b278","doi-asserted-by":"crossref","first-page":"85","DOI":"10.3390\/brainsci10020085","article-title":"Multimodal affective state assessment using fNIRS+ EEG and spontaneous facial expression","volume":"10","author":"Sun","year":"2020","journal-title":"Brain Sciences"},{"key":"10.1016\/j.iswa.2026.200642_b279","doi-asserted-by":"crossref","DOI":"10.1016\/j.bspc.2021.103029","article-title":"A multimodal emotion recognition method based on facial expressions and electroencephalography","volume":"70","author":"Tan","year":"2021","journal-title":"Biomedical Signal Processing and Control"},{"issue":"5","key":"10.1016\/j.iswa.2026.200642_b280","doi-asserted-by":"crossref","first-page":"1167","DOI":"10.28991\/ESJ-2022-06-05-017","article-title":"A systematic review on emotion recognition system using physiological signals: data acquisition and methodology","volume":"6","author":"Tawsif","year":"2022","journal-title":"Emerging Science Journal"},{"key":"10.1016\/j.iswa.2026.200642_b281","doi-asserted-by":"crossref","unstructured":"Thai Ly, S., Do, N.-T., Lee, G.-S., Kim, S.-H., & Yang, H.-J. (2019). Multimodal 2D and 3D for In-the-wild Facial Expression Recognition. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition workshops.","DOI":"10.1109\/CVPRW.2019.00353"},{"key":"10.1016\/j.iswa.2026.200642_b282","series-title":"2016 international conference on circuit, power and computing technologies","first-page":"1","article-title":"A multimodal emotion recognition system from video","author":"Thushara","year":"2016"},{"key":"10.1016\/j.iswa.2026.200642_b283","doi-asserted-by":"crossref","unstructured":"Tian, W., Huang, X., & Zou, S. (2025). Multi-Condition Guided Diffusion Network for Multimodal Emotion Recognition in Conversation. In Findings of the association for computational linguistics: NAACL 2025 (pp. 3215\u20133227).","DOI":"10.18653\/v1\/2025.findings-naacl.177"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b284","doi-asserted-by":"crossref","first-page":"1397","DOI":"10.1007\/s41870-023-01697-7","article-title":"Fusing facial and speech cues for enhanced multimodal emotion recognition","volume":"16","author":"Tomar","year":"2024","journal-title":"International Journal of Information Technology"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b285","article-title":"Multimodal music emotion recognition method based on the combination of knowledge distillation and transfer learning","volume":"2022","author":"Tong","year":"2022","journal-title":"Scientific Programming"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b286","doi-asserted-by":"crossref","first-page":"9","DOI":"10.1007\/s12193-016-0222-y","article-title":"SVM-based feature selection methods for emotion recognition from multimodal data","volume":"11","author":"Torres-Valencia","year":"2023","journal-title":"Journal on Multimodal User Interfaces"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b287","doi-asserted-by":"crossref","first-page":"1","DOI":"10.7763\/IJCTE.2021.V13.1282","article-title":"SVM-based face recognition through difference of Gaussians and local phase quantization","volume":"13","author":"Tran","year":"2021","journal-title":"International Journal of Computer Theory and Engineering"},{"key":"10.1016\/j.iswa.2026.200642_b288","doi-asserted-by":"crossref","unstructured":"Tsai, T.-W., Lo, H. Y., & Chen, K.-S. (2012). An affective computing approach to develop the game-based adaptive learning material for the elementary students. In Proceedings of the 2012 joint international conference on human-centered computer environments (pp. 8\u201313).","DOI":"10.1145\/2160749.2160752"},{"issue":"8","key":"10.1016\/j.iswa.2026.200642_b289","doi-asserted-by":"crossref","first-page":"1301","DOI":"10.1109\/JSTSP.2017.2764438","article-title":"End-to-end multimodal emotion recognition using deep neural networks","volume":"11","author":"Tzirakis","year":"2017","journal-title":"IEEE Journal of Selected Topics in Signal Processing"},{"key":"10.1016\/j.iswa.2026.200642_b290","doi-asserted-by":"crossref","DOI":"10.1016\/j.bspc.2024.106224","article-title":"Emotion fusion-sense (Emo Fu-sense)\u2013a novel multimodal emotion classification technique","volume":"94","author":"Umair","year":"2024","journal-title":"Biomedical Signal Processing and Control"},{"key":"10.1016\/j.iswa.2026.200642_b291","doi-asserted-by":"crossref","unstructured":"Vaiani, L., La Quatra, M., Cagliero, L., & Garza, P. (2022). Viper: Video-based perceiver for emotion recognition. In Proceedings of the 3rd international on multimodal sentiment analysis workshop and challenge (pp. 67\u201373).","DOI":"10.1145\/3551876.3554806"},{"issue":"04","key":"10.1016\/j.iswa.2026.200642_b292","doi-asserted-by":"crossref","DOI":"10.1142\/S0129065720500136","article-title":"Real-time multi-modal estimation of dynamically evoked emotions using EEG, heart rate and galvanic skin response","volume":"30","author":"Val-Calvo","year":"2020","journal-title":"International Journal of Neural Systems"},{"key":"10.1016\/j.iswa.2026.200642_b293","first-page":"1","article-title":"Multimodal emotion recognition system for e-learning platform","author":"Vani","year":"2025","journal-title":"Education and Information Technologies"},{"key":"10.1016\/j.iswa.2026.200642_b294","article-title":"Attention is all you need","volume":"30","author":"Vaswani","year":"2017","journal-title":"Advances in Neural Information Processing Systems"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b295","doi-asserted-by":"crossref","first-page":"89","DOI":"10.1109\/TAFFC.2021.3065726","article-title":"Automatic emotion recognition for groups: a review","volume":"14","author":"Veltmeijer","year":"2021","journal-title":"IEEE Transactions on Affective Computing"},{"key":"10.1016\/j.iswa.2026.200642_b296","series-title":"2009 3rd international conference on affective computing and intelligent interaction and workshops","first-page":"1","article-title":"Smart sensor integration: A framework for multimodal emotion recognition in real-time","author":"Wagner","year":"2009"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b297","doi-asserted-by":"crossref","first-page":"1675","DOI":"10.1109\/TITS.2023.3314402","article-title":"Cost-sensitive graph convolutional network with self-paced learning for hit-and-run analysis","volume":"25","author":"Wan","year":"2023","journal-title":"Transactions on Intelligent Transportation Systems"},{"issue":"5","key":"10.1016\/j.iswa.2026.200642_b298","doi-asserted-by":"crossref","first-page":"449","DOI":"10.1504\/IJBM.2024.140770","article-title":"Rapid recognition of athlete\u2019s anxiety emotion based on multimodal fusion","volume":"16","author":"Wang","year":"2024","journal-title":"International Journal of Biometrics"},{"key":"10.1016\/j.iswa.2026.200642_b299","series-title":"Multimodal query suggestion with multi-agent reinforcement learning from human feedback","author":"Wang","year":"2024"},{"key":"10.1016\/j.iswa.2026.200642_b300","series-title":"2017 IEEE international conference on multimedia and expo","first-page":"949","article-title":"Select-additive learning: Improving generalization in multimodal sentiment analysis","author":"Wang","year":"2017"},{"key":"10.1016\/j.iswa.2026.200642_b301","doi-asserted-by":"crossref","first-page":"33061","DOI":"10.1109\/ACCESS.2023.3263670","article-title":"Multimodal emotion recognition from EEG signals and facial expressions","volume":"11","author":"Wang","year":"2023","journal-title":"IEEE Access"},{"issue":"4","key":"10.1016\/j.iswa.2026.200642_b302","doi-asserted-by":"crossref","first-page":"4897","DOI":"10.1007\/s11042-021-10553-4","article-title":"Speech emotion recognition based on multi-feature and multi-lingual fusion","volume":"81","author":"Wang","year":"2022","journal-title":"Multimedia Tools and Applications"},{"issue":"35","key":"10.1016\/j.iswa.2026.200642_b303","doi-asserted-by":"crossref","first-page":"21923","DOI":"10.1007\/s00521-024-10371-3","article-title":"A review of multimodal-based emotion recognition techniques for cyberbullying detection in online social media platforms","volume":"36","author":"Wang","year":"2024","journal-title":"Neural Computing and Applications"},{"key":"10.1016\/j.iswa.2026.200642_b304","doi-asserted-by":"crossref","first-page":"19","DOI":"10.1016\/j.inffus.2022.03.009","article-title":"A systematic review on affective computing: Emotion models, databases, and recent advances","volume":"83","author":"Wang","year":"2022","journal-title":"Information Fusion"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b305","doi-asserted-by":"crossref","first-page":"107","DOI":"10.1007\/s12559-025-10463-9","article-title":"Contrastive-based removal of negative information in multimodal emotion analysis","volume":"17","author":"Wang","year":"2025","journal-title":"Cognitive Computation"},{"key":"10.1016\/j.iswa.2026.200642_b306","doi-asserted-by":"crossref","DOI":"10.1016\/j.compbiomed.2022.105907","article-title":"Multi-modal emotion recognition using EEG and speech signals","volume":"149","author":"Wang","year":"2022","journal-title":"Computers in Biology and Medicine"},{"key":"10.1016\/j.iswa.2026.200642_b307","unstructured":"Wei, Y., Fu, H., Li, Y., Xin, Y., Xu, X., Zhou, F., & Zhong, T. Decoding emotional silences: Reliable multimodal sentiment analysis with bipolar uncertainty."},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b308","doi-asserted-by":"crossref","first-page":"10","DOI":"10.1109\/TBC.2022.3215245","article-title":"FV2ES: A fully end2end multimodal system for fast yet effective video emotion recognition inference","volume":"69","author":"Wei","year":"2022","journal-title":"IEEE Transactions on Broadcasting"},{"key":"10.1016\/j.iswa.2026.200642_b309","series-title":"PromptMM: Multi-modal knowledge distillation for recommendation with prompt-tuning","author":"Wei","year":"2024"},{"key":"10.1016\/j.iswa.2026.200642_b310","series-title":"Proceedings of the 28th ACM SIGKDD conference on knowledge discovery and data mining","first-page":"4153","article-title":"Graph neural networks for multimodal single-cell data integration","author":"Wen","year":"2022"},{"key":"10.1016\/j.iswa.2026.200642_b311","series-title":"Emotion markup language (EmotionML) 1.0","author":"World Wide Web Consortium (W3C)","year":"2014"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b312","doi-asserted-by":"crossref","first-page":"157","DOI":"10.1109\/TAFFC.2023.3263907","article-title":"Transformer-based self-supervised multimodal representation learning for wearable emotion recognition","volume":"15","author":"Wu","year":"2023","journal-title":"IEEE Transactions on Affective Computing"},{"key":"10.1016\/j.iswa.2026.200642_b313","series-title":"Proceedings of the 29th ACM SIGKDD conference on knowledge discovery and data mining","first-page":"2618","article-title":"Recognizing unseen objects via multimodal intensive knowledge graph propagation","author":"Wu","year":"2023"},{"key":"10.1016\/j.iswa.2026.200642_b314","first-page":"1","article-title":"MLGAT: multi-layer graph attention networks for multimodal emotion recognition in conversations","author":"Wu","year":"2024","journal-title":"Journal of Intelligent Information Systems"},{"key":"10.1016\/j.iswa.2026.200642_b315","doi-asserted-by":"crossref","unstructured":"Wu, D., Yang, D., Zhou, Y., & Ma, C. (2024). Robust multimodal sentiment analysis of image-text pairs by distribution-based feature recovery and fusion. In Proceedings of the 32nd ACM international conference on multimedia (pp. 5780\u20135789).","DOI":"10.1145\/3664647.3680653"},{"key":"10.1016\/j.iswa.2026.200642_b316","doi-asserted-by":"crossref","first-page":"133180","DOI":"10.1109\/ACCESS.2020.3010311","article-title":"Multimodal fused emotion recognition about expression-EEG interaction and collaboration using deep learning","volume":"8","author":"Wu","year":"2020","journal-title":"IEEE Access"},{"key":"10.1016\/j.iswa.2026.200642_b317","series-title":"2019 8th international conference on affective computing and intelligent interaction","first-page":"648","article-title":"Attending to emotional narratives","author":"Wu","year":"2019"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b318","doi-asserted-by":"crossref","DOI":"10.1088\/1741-2552\/ac49a7","article-title":"Investigating EEG-based functional connectivity patterns for multimodal emotion recognition","volume":"19","author":"Wu","year":"2022","journal-title":"Journal of Neural Engineering"},{"key":"10.1016\/j.iswa.2026.200642_b319","series-title":"Graph learning under distribution shifts: A comprehensive survey on domain adaptation, out-of-distribution, and continual learning","author":"Wu","year":"2024"},{"key":"10.1016\/j.iswa.2026.200642_b320","doi-asserted-by":"crossref","unstructured":"Xia, B., & Wang, S. (2020). Occluded facial expression recognition with step-wise assistance from unpaired non-occluded images. In Proceedings of the 28th ACM international conference on multimedia (pp. 2927\u20132935).","DOI":"10.1145\/3394171.3413773"},{"key":"10.1016\/j.iswa.2026.200642_b321","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2024.128937","article-title":"Dual-level constraint based distributed graph convolution network for multimodal emotion recognition in conversation","volume":"618","author":"Xiang","year":"2025","journal-title":"Neurocomputing"},{"issue":"14","key":"10.1016\/j.iswa.2026.200642_b322","doi-asserted-by":"crossref","first-page":"4913","DOI":"10.3390\/s21144913","article-title":"Robust multimodal emotion recognition from conversation with transformer-based crossmodality fusion","volume":"21","author":"Xie","year":"2021","journal-title":"Sensors"},{"key":"10.1016\/j.iswa.2026.200642_b323","doi-asserted-by":"crossref","first-page":"3802","DOI":"10.1109\/ACCESS.2019.2961139","article-title":"Intelligent emotion detection method based on deep learning in medical and health data","volume":"8","author":"Xu","year":"2019","journal-title":"IEEE Access"},{"key":"10.1016\/j.iswa.2026.200642_b324","doi-asserted-by":"crossref","DOI":"10.1109\/TAI.2024.3523250","article-title":"A hierarchical cross-modal spatial fusion network for multimodal emotion recognition","author":"Xu","year":"2025","journal-title":"IEEE Transactions on Artificial Intelligence"},{"issue":"4","key":"10.1016\/j.iswa.2026.200642_b325","doi-asserted-by":"crossref","first-page":"1323","DOI":"10.1007\/s11045-022-00845-9","article-title":"Neural network-based blended ensemble learning for speech emotion recognition","volume":"33","author":"Yalamanchili","year":"2022","journal-title":"Multidimensional Systems and Signal Processing"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b326","doi-asserted-by":"crossref","first-page":"516","DOI":"10.1109\/TNNLS.2021.3097748","article-title":"Deep multiview collaborative clustering","volume":"34","author":"Yang","year":"2023","journal-title":"IEEE Transactions on Neural Networks and Learning Systems"},{"issue":"15","key":"10.1016\/j.iswa.2026.200642_b327","doi-asserted-by":"crossref","first-page":"3088","DOI":"10.3390\/electronics14153088","article-title":"MGMR-Net: Mamba-guided multimodal reconstruction and fusion network for sentiment analysis with incomplete modalities","volume":"14","author":"Yang","year":"2025","journal-title":"Electronics"},{"key":"10.1016\/j.iswa.2026.200642_b328","doi-asserted-by":"crossref","unstructured":"Yang, E., Yao, D., Liu, T., & Deng, C. (2022). Mutual Quantization for Cross-Modal Search With Noisy Labels. In Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition (pp. 7551\u20137560).","DOI":"10.1109\/CVPR52688.2022.00740"},{"key":"10.1016\/j.iswa.2026.200642_b329","doi-asserted-by":"crossref","DOI":"10.1016\/j.iot.2023.100971","article-title":"A smart e-health framework for monitoring the health of the elderly and disabled","volume":"24","author":"Yazici","year":"2023","journal-title":"Internet of Things"},{"issue":"2","key":"10.1016\/j.iswa.2026.200642_b330","doi-asserted-by":"crossref","first-page":"9","DOI":"10.1007\/s11280-024-01246-7","article-title":"PriMonitor: an adaptive tuning privacy-preserving approach for multimodal emotion detection","volume":"27","author":"Yin","year":"2024","journal-title":"World Wide Web"},{"key":"10.1016\/j.iswa.2026.200642_b331","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2024.108348","article-title":"Token-disentangling mutual transformer for multimodal emotion recognition","volume":"133","author":"Yin","year":"2024","journal-title":"Engineering Applications of Artificial Intelligence"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b332","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3490686","article-title":"A multimodal framework for large-scale emotion recognition by fusing music and electrodermal activity signals","volume":"18","author":"Yin","year":"2022","journal-title":"ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM)"},{"key":"10.1016\/j.iswa.2026.200642_b333","doi-asserted-by":"crossref","unstructured":"Yu, W., Xu, H., Meng, F., Zhu, Y., Ma, Y., Wu, J., Zou, J., & Yang, K. (2020). Ch-sims: A chinese multimodal sentiment analysis dataset with fine-grained annotation of modality. In Proceedings of the 58th annual meeting of the association for computational linguistics (pp. 3718\u20133727).","DOI":"10.18653\/v1\/2020.acl-main.343"},{"key":"10.1016\/j.iswa.2026.200642_b334","series-title":"Asking multimodal clarifying questions in mixed-initiative conversational search","author":"Yuan","year":"2024"},{"key":"10.1016\/j.iswa.2026.200642_b335","doi-asserted-by":"crossref","unstructured":"Zadeh, A. B., Liang, P. P., Poria, S., Cambria, E., & Morency, L.-P. (2018). Multimodal language analysis in the wild: Cmu-mosei dataset and interpretable dynamic fusion graph. In Proceedings of the 56th annual meeting of the association for computational linguistics (volume 1: long papers) (pp. 2236\u20132246).","DOI":"10.18653\/v1\/P18-1208"},{"key":"10.1016\/j.iswa.2026.200642_b336","series-title":"Cross-language speech emotion recognition using multimodal dual attention transformers","author":"Zaidi","year":"2023"},{"issue":"5","key":"10.1016\/j.iswa.2026.200642_b337","doi-asserted-by":"crossref","first-page":"951","DOI":"10.1007\/s00371-019-01705-7","article-title":"4D facial expression recognition using multimodal time series analysis of geometric landmark-based deformations","volume":"36","author":"Zarbakhsh","year":"2020","journal-title":"Visual Computer"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b338","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3388790","article-title":"Driver emotion recognition for intelligent vehicles: A survey","volume":"53","author":"Zepf","year":"2020","journal-title":"ACM Computing Surveys"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b339","doi-asserted-by":"crossref","first-page":"300","DOI":"10.1109\/TAFFC.2016.2553038","article-title":"BAUM-1: A spontaneous audio-visual face database of affective and mental states","volume":"8","author":"Zhalehpour","year":"2016","journal-title":"IEEE Transactions on Affective Computing"},{"key":"10.1016\/j.iswa.2026.200642_b340","doi-asserted-by":"crossref","first-page":"164130","DOI":"10.1109\/ACCESS.2020.3021994","article-title":"Expression-EEG based collaborative multimodal emotion recognition using deep autoencoder","volume":"8","author":"Zhang","year":"2020","journal-title":"IEEE Access"},{"key":"10.1016\/j.iswa.2026.200642_b341","doi-asserted-by":"crossref","DOI":"10.3389\/fnins.2024.1466013","article-title":"RDA-MTE: an innovative model for emotion recognition in sports behavior decision-making","volume":"18","author":"Zhang","year":"2024","journal-title":"Frontiers in Neuroscience"},{"key":"10.1016\/j.iswa.2026.200642_b342","doi-asserted-by":"crossref","first-page":"5287","DOI":"10.1109\/TIP.2021.3082298","article-title":"Self-training with progressive representation enhancement for unsupervised cross-domain person re-identification","volume":"30","author":"Zhang","year":"2021","journal-title":"IEEE Transactions on Image Processing"},{"key":"10.1016\/j.iswa.2026.200642_b343","doi-asserted-by":"crossref","DOI":"10.1016\/j.bspc.2024.107036","article-title":"A multimodal emotion classification method considering micro-expression information and simulating human visual attention mechanism","volume":"100","author":"Zhang","year":"2025","journal-title":"Biomedical Signal Processing and Control"},{"key":"10.1016\/j.iswa.2026.200642_b344","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2021.107340","article-title":"Combining cross-modal knowledge transfer and semi-supervised learning for speech emotion recognition","volume":"229","author":"Zhang","year":"2021","journal-title":"Knowledge-Based Systems"},{"key":"10.1016\/j.iswa.2026.200642_b345","doi-asserted-by":"crossref","DOI":"10.1016\/j.bspc.2022.103877","article-title":"Emotion recognition using heterogeneous convolutional neural networks combined with multimodal factorized bilinear pooling","volume":"77","author":"Zhang","year":"2022","journal-title":"Biomedical Signal Processing and Control"},{"key":"10.1016\/j.iswa.2026.200642_b346","doi-asserted-by":"crossref","first-page":"7943","DOI":"10.1109\/ACCESS.2021.3049516","article-title":"Multimodal emotion recognition using a hierarchical fusion convolutional neural network","volume":"9","author":"Zhang","year":"2021","journal-title":"IEEE Access"},{"issue":"1","key":"10.1016\/j.iswa.2026.200642_b347","first-page":"1","article-title":"M3GAT: A multi-modal, multi-task interactive graph attention network for conversational sentiment analysis and emotion recognition","volume":"42","author":"Zhang","year":"2023","journal-title":"ACM Transactions on Information Systems"},{"key":"10.1016\/j.iswa.2026.200642_b348","series-title":"Proceedings of the 40th international conference on machine learning","article-title":"Improving medical predictions by irregular multimodal electronic health records modeling","author":"Zhang","year":"2023"},{"issue":"5","key":"10.1016\/j.iswa.2026.200642_b349","doi-asserted-by":"crossref","first-page":"3192","DOI":"10.1109\/TCSVT.2023.3312858","article-title":"Transformer-based multimodal emotional perception for dynamic facial expression recognition in the wild","volume":"34","author":"Zhang","year":"2023","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"key":"10.1016\/j.iswa.2026.200642_b350","doi-asserted-by":"crossref","first-page":"1898","DOI":"10.1109\/LSP.2021.3112314","article-title":"Feature fusion for multimodal emotion recognition based on deep canonical correlation analysis","volume":"28","author":"Zhang","year":"2021","journal-title":"IEEE Signal Processing Letters"},{"key":"10.1016\/j.iswa.2026.200642_b351","series-title":"2016 IEEE symposium series on computational intelligence","first-page":"1","article-title":"\u201cBioVid Emo DB\u201d: A multimodal database for emotion analyses validated by subjective ratings","author":"Zhang","year":"2016"},{"issue":"11","key":"10.1016\/j.iswa.2026.200642_b352","article-title":"Multi-behavioral recommendation model based on dual neural networks and contrast learning","volume":"31","author":"Zhang","year":"2023","journal-title":"Electronic Research Archive"},{"key":"10.1016\/j.iswa.2026.200642_b353","doi-asserted-by":"crossref","DOI":"10.1016\/j.bspc.2023.105052","article-title":"Multimodal emotion recognition based on audio and text by using hybrid attention networks","volume":"85","author":"Zhang","year":"2023","journal-title":"Biomedical Signal Processing and Control"},{"key":"10.1016\/j.iswa.2026.200642_b354","doi-asserted-by":"crossref","DOI":"10.1016\/j.eswa.2023.121692","article-title":"Deep learning-based multimodal emotion recognition from audio, visual, and text modalities: A systematic review of recent advancements and future prospects","volume":"237","author":"Zhang","year":"2024","journal-title":"Expert Systems with Applications"},{"issue":"10","key":"10.1016\/j.iswa.2026.200642_b355","doi-asserted-by":"crossref","first-page":"3030","DOI":"10.1109\/TCSVT.2017.2719043","article-title":"Learning affective features with a hybrid deep model for audio\u2013visual emotion recognition","volume":"28","author":"Zhang","year":"2017","journal-title":"IEEE Transactions on Circuits and Systems for Video Technology"},{"issue":"4","key":"10.1016\/j.iswa.2026.200642_b356","doi-asserted-by":"crossref","DOI":"10.1016\/j.ipm.2024.103730","article-title":"Cross-domain knowledge collaboration for blending-target domain adaptation","volume":"61","author":"Zhang","year":"2024","journal-title":"Information Processing & Management"},{"issue":"14","key":"10.1016\/j.iswa.2026.200642_b357","doi-asserted-by":"crossref","first-page":"7199","DOI":"10.3390\/app12147199","article-title":"An innovative process design model for machined surface error distribution consistency in high-efficiency milling","volume":"12","author":"Zhao","year":"2022","journal-title":"Applied Sciences"},{"key":"10.1016\/j.iswa.2026.200642_b358","doi-asserted-by":"crossref","first-page":"19","DOI":"10.1016\/j.aej.2024.10.059","article-title":"IoT-based approach to multimodal music emotion recognition","volume":"113","author":"Zhao","year":"2025","journal-title":"Alexandria Engineering Journal"},{"key":"10.1016\/j.iswa.2026.200642_b359","doi-asserted-by":"crossref","first-page":"52","DOI":"10.1016\/j.neunet.2021.03.013","article-title":"Combining a parallel 2D CNN with a self-attention Dilated Residual Network for CTC-based discrete speech emotion recognition","volume":"141","author":"Zhao","year":"2021","journal-title":"Neural Networks"},{"key":"10.1016\/j.iswa.2026.200642_b360","series-title":"Proceedings of the 29th ACM SIGKDD conference on knowledge discovery and data mining","first-page":"5639","article-title":"Robust multimodal failure detection for microservice systems","author":"Zhao","year":"2023"},{"key":"10.1016\/j.iswa.2026.200642_b361","series-title":"Multi-level fusion of wav2vec 2.0 and bert for multimodal emotion recognition","author":"Zhao","year":"2022"},{"key":"10.1016\/j.iswa.2026.200642_b362","series-title":"ICASSP 2023-2023 IEEE international conference on acoustics, speech and signal processing","first-page":"1","article-title":"Knowledge-aware bayesian co-attention for multimodal emotion recognition","author":"Zhao","year":"2023"},{"key":"10.1016\/j.iswa.2026.200642_b363","doi-asserted-by":"crossref","DOI":"10.1016\/j.iot.2024.101069","article-title":"A multimodal teacher speech emotion recognition method in the smart classroom","volume":"25","author":"Zhao","year":"2024","journal-title":"Internet of Things"},{"key":"10.1016\/j.iswa.2026.200642_b364","series-title":"Multi-modal causal structure learning and root cause analysis","author":"Zheng","year":"2024"},{"key":"10.1016\/j.iswa.2026.200642_b365","series-title":"2014 36th annual international conference of the IEEE engineering in medicine and biology society","first-page":"5040","article-title":"Multimodal emotion recognition using EEG and eye tracking data","author":"Zheng","year":"2014"},{"key":"10.1016\/j.iswa.2026.200642_b366","doi-asserted-by":"crossref","DOI":"10.1016\/j.eswa.2023.122728","article-title":"DJMF: A discriminative joint multi-task framework for multimodal sentiment analysis based on intra-and inter-task dynamics","volume":"242","author":"Zheng","year":"2024","journal-title":"Expert Systems with Applications"},{"issue":"3","key":"10.1016\/j.iswa.2026.200642_b367","doi-asserted-by":"crossref","first-page":"1110","DOI":"10.1109\/TCYB.2018.2797176","article-title":"Emotionmeter: A multimodal framework for recognizing human emotions","volume":"49","author":"Zheng","year":"2018","journal-title":"IEEE Transactions on Cybernetics"},{"key":"10.1016\/j.iswa.2026.200642_b368","series-title":"EventDance: Unsupervised source-free cross-modal adaptation for event-based object recognition","author":"Zheng","year":"2024"},{"key":"10.1016\/j.iswa.2026.200642_b369","series-title":"Proceedings of the 28th ACM SIGKDD conference on knowledge discovery and data mining","first-page":"2594","article-title":"Contrastive learning with complex heterogeneity","author":"Zheng","year":"2022"},{"key":"10.1016\/j.iswa.2026.200642_b370","doi-asserted-by":"crossref","first-page":"2617","DOI":"10.1109\/TASLP.2021.3096037","article-title":"Information fusion in attention networks using adaptive and multi-level factorized bilinear pooling for audio-visual emotion recognition","volume":"29","author":"Zhou","year":"2021","journal-title":"IEEE\/ACM Transactions on Audio, Speech, and Language Processing"},{"issue":"5","key":"10.1016\/j.iswa.2026.200642_b371","doi-asserted-by":"crossref","DOI":"10.18280\/ts.390503","article-title":"Emotion recognition of college students based on audio and video image","volume":"39","author":"Zhu","year":"2022","journal-title":"Traitement du Signal"},{"key":"10.1016\/j.iswa.2026.200642_b372","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2025.103268","article-title":"RMER-DT: Robust multimodal emotion recognition in conversational contexts based on diffusion and transformers","author":"Zhu","year":"2025","journal-title":"Information Fusion"},{"key":"10.1016\/j.iswa.2026.200642_b373","series-title":"International conference on human-computer interaction","first-page":"82","article-title":"DriveSense: A multi-modal emotion recognition and regulation system for a car driver","author":"Zhu","year":"2024"},{"key":"10.1016\/j.iswa.2026.200642_b374","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2022.109978","article-title":"Improving multimodal fusion with Main Modal Transformer for emotion recognition in conversation","volume":"258","author":"Zou","year":"2022","journal-title":"Knowledge-Based Systems"},{"key":"10.1016\/j.iswa.2026.200642_b375","series-title":"ICASSP 2023-2023 IEEE international conference on acoustics, speech and signal processing","first-page":"1","article-title":"Exploiting modality-invariant feature for robust multimodal emotion recognition with missing modalities","author":"Zuo","year":"2023"}],"container-title":["Intelligent Systems with Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S2667305326000177?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S2667305326000177?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T18:38:43Z","timestamp":1775155123000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S2667305326000177"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,5]]},"references-count":375,"alternative-id":["S2667305326000177"],"URL":"https:\/\/doi.org\/10.1016\/j.iswa.2026.200642","relation":{},"ISSN":["2667-3053"],"issn-type":[{"value":"2667-3053","type":"print"}],"subject":[],"published":{"date-parts":[[2026,5]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"State-of-the-art Multimodal Emotion Recognition: A comprehensive survey and taxonomy","name":"articletitle","label":"Article Title"},{"value":"Intelligent Systems with Applications","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.iswa.2026.200642","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 The Authors. Published by Elsevier Ltd.","name":"copyright","label":"Copyright"}],"article-number":"200642"}}