{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T21:05:49Z","timestamp":1776891949379,"version":"3.51.2"},"reference-count":53,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,6,1]],"date-time":"2026-06-01T00:00:00Z","timestamp":1780272000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100002701","name":"Ministry of Education","doi-asserted-by":"publisher","award":["24YJA870003"],"award-info":[{"award-number":["24YJA870003"]}],"id":[{"id":"10.13039\/501100002701","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Neurocomputing"],"published-print":{"date-parts":[[2026,6]]},"DOI":"10.1016\/j.neucom.2026.133424","type":"journal-article","created":{"date-parts":[[2026,3,24]],"date-time":"2026-03-24T16:03:10Z","timestamp":1774368190000},"page":"133424","update-policy":"https:\/\/doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":0,"special_numbering":"C","title":["MMCGR: A multimodal cascaded gated fusion RWKV-based model for emotion recognition in conversations"],"prefix":"10.1016","volume":"682","author":[{"given":"Bo","family":"He","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0009-9574-3627","authenticated-orcid":false,"given":"Hao","family":"Han","sequence":"additional","affiliation":[]},{"given":"Ruoyu","family":"Zhao","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.neucom.2026.133424_bib0005","series-title":"2015 Eighth International Conference on Contemporary Computing (IC3)","first-page":"285","article-title":"Emotion analysis of Twitter using opinion mining","author":"Kumar","year":"2015"},{"key":"10.1016\/j.neucom.2026.133424_bib0010","doi-asserted-by":"crossref","first-page":"100943","DOI":"10.1109\/ACCESS.2019.2929050","article-title":"Emotion recognition in conversation: research challenges, datasets, and recent advances","volume":"7","author":"Poria","year":"2019","journal-title":"IEEE Access"},{"issue":"5","key":"10.1016\/j.neucom.2026.133424_bib0015","first-page":"8002","article-title":"Real-Time emotion recognition via attention gated hierarchical memory network","volume":"34","author":"Jiao","year":"2020","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"10.1016\/j.neucom.2026.133424_bib0020","author":"Kim"},{"key":"10.1016\/j.neucom.2026.133424_bib0025","series-title":"Proceedings of the AAAI Conference on Artificial Intelligence","first-page":"6818","article-title":"Dialoguernn: an attentive RNN for emotion detection in conversations","volume":"vol. 33","author":"Majumder","year":"2019"},{"key":"10.1016\/j.neucom.2026.133424_bib0030","series-title":"Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies","first-page":"5669","article-title":"CoMPM: context modeling with speaker\u2019s pre-trained memory tracking for emotion recognition in conversation","author":"Lee","year":"2022"},{"key":"10.1016\/j.neucom.2026.133424_bib0035","series-title":"Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies","first-page":"4148","article-title":"COGMEN: COntextualized GNN based multimodal emotion recognition","author":"Joshi","year":"2022"},{"issue":"11","key":"10.1016\/j.neucom.2026.133424_bib0040","first-page":"13121","article-title":"SKIER: a symbolic knowledge integrated model for conversational emotion recognition","volume":"37","author":"Li","year":"2023","journal-title":"Proc. AAAI Conf. Artif. Intell."},{"key":"10.1016\/j.neucom.2026.133424_bib0045","series-title":"ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","first-page":"1","article-title":"MGAT: Multi-Granularity attention based transformers for Multi-Modal emotion recognition","author":"Fan","year":"2023"},{"key":"10.1016\/j.neucom.2026.133424_bib0050","series-title":"Findings of the Association for Computational Linguistics: EMNLP 2023","first-page":"14048","article-title":"RWKV: reinventing RNNs for the transformer era","author":"Peng","year":"2023"},{"key":"10.1016\/j.neucom.2026.133424_bib0055","author":"Peng"},{"key":"10.1016\/j.neucom.2026.133424_bib0060","series-title":"Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)","first-page":"873","article-title":"Context-dependent sentiment analysis in user-generated videos","author":"Poria","year":"2017"},{"key":"10.1016\/j.neucom.2026.133424_bib0065","series-title":"Proceedings of the Conference. Association for Computational Linguistics. North American Chapter. Meeting","first-page":"2122","article-title":"Conversational memory network for emotion recognition in dyadic dialogue videos","volume":"vol. 2018","author":"Hazarika","year":"2018"},{"key":"10.1016\/j.neucom.2026.133424_bib0070","series-title":"Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)","first-page":"5666","article-title":"MMGCN: multimodal fusion via deep graph convolution network for emotion recognition in conversation","author":"Hu","year":"2021"},{"key":"10.1016\/j.neucom.2026.133424_bib0075","series-title":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","first-page":"7037","article-title":"MM-DFN: multimodal dynamic fusion network for emotion recognition in conversations","author":"Hu","year":"2022"},{"key":"10.1016\/j.neucom.2026.133424_bib0080","doi-asserted-by":"crossref","first-page":"2325","DOI":"10.1109\/TASLP.2023.3284509","article-title":"RBA-GCN: relational bilevel aggregation graph convolutional network for emotion recognition","volume":"31","author":"Yuan","year":"2023","journal-title":"IEEE\/ACM Trans. Audio Speech Lang. Process."},{"key":"10.1016\/j.neucom.2026.133424_bib0085","series-title":"Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)","first-page":"82","article-title":"TelME: teacher-leading multimodal fusion network for emotion recognition in conversation","author":"Yun","year":"2024"},{"key":"10.1016\/j.neucom.2026.133424_bib0090","series-title":"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing","first-page":"2594","article-title":"Icon: interactive conversational memory network for multimodal emotion detection","author":"Hazarika","year":"2018"},{"key":"10.1016\/j.neucom.2026.133424_bib0095","author":"Ghosal"},{"key":"10.1016\/j.neucom.2026.133424_bib0100","doi-asserted-by":"crossref","first-page":"4422","DOI":"10.1109\/TMM.2021.3117062","article-title":"LR-GCN: latent relation-aware graph convolutional network for conversational emotion recognition","volume":"24","author":"Ren","year":"2021","journal-title":"IEEE Trans. Multimedia"},{"key":"10.1016\/j.neucom.2026.133424_bib0105","series-title":"2022 IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)","first-page":"4651","article-title":"M2Fnet: multi-modal fusion network for emotion recognition in conversation","author":"Chudasama","year":"2022"},{"key":"10.1016\/j.neucom.2026.133424_bib0110","series-title":"Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)","first-page":"13099","article-title":"A Cross-Modality context fusion and semantic refinement network for emotion recognition in conversation","author":"Zhang","year":"2023"},{"key":"10.1016\/j.neucom.2026.133424_bib0115","series-title":"Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)","first-page":"14752","article-title":"MultiEMO: an Attention-Based Correlation-Aware multimodal fusion framework for emotion recognition in conversations","author":"Shi","year":"2023"},{"key":"10.1016\/j.neucom.2026.133424_bib0120","author":"Duan"},{"key":"10.1016\/j.neucom.2026.133424_bib0125","author":"Yang"},{"key":"10.1016\/j.neucom.2026.133424_bib0130","author":"Hou"},{"key":"10.1016\/j.neucom.2026.133424_bib0135","series-title":"ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","first-page":"1","article-title":"HFE-RWKV: High-Frequency enhanced RWKV model for efficient left ventricle segmentation in pediatric echocardiograms","author":"Ye","year":"2025"},{"key":"10.1016\/j.neucom.2026.133424_bib0140","author":"Liu"},{"key":"10.1016\/j.neucom.2026.133424_bib0145","author":"Ghosal"},{"issue":"2","key":"10.1016\/j.neucom.2026.133424_bib0150","doi-asserted-by":"crossref","first-page":"343","DOI":"10.1007\/s13042-020-01175-7","article-title":"Cross-domain sentiment aware word embeddings for review sentiment analysis","volume":"12","author":"Liu","year":"2021","journal-title":"Int. J. Mach. Learn. Cybern."},{"key":"10.1016\/j.neucom.2026.133424_bib0155","series-title":"Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics","first-page":"2506","article-title":"Multi-Modal sarcasm detection in Twitter with hierarchical fusion model","author":"Cai","year":"2019"},{"key":"10.1016\/j.neucom.2026.133424_bib0160","series-title":"Second Grand-Challenge and Workshop on Multimodal Language (Challenge-HML)","first-page":"29","article-title":"Low rank fusion based transformers for multimodal sequences","author":"Sahay","year":"2020"},{"key":"10.1016\/j.neucom.2026.133424_bib0165","doi-asserted-by":"crossref","DOI":"10.1016\/j.knosys.2022.110021","article-title":"Sentiment-aware multimodal pre-training for multimodal sentiment analysis","volume":"258","author":"Ye","year":"2022","journal-title":"Knowl.-Based Syst."},{"key":"10.1016\/j.neucom.2026.133424_bib0170","doi-asserted-by":"crossref","first-page":"3054","DOI":"10.1109\/TIP.2023.3277791","article-title":"Multi-Modal mutual attention and iterative interaction for referring image segmentation","volume":"32","author":"Liu","year":"2023","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.neucom.2026.133424_bib0175","series-title":"2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW)","first-page":"6537","article-title":"TransFusion: multi-modal fusion network for semantic segmentation","author":"Maiti","year":"2023"},{"key":"10.1016\/j.neucom.2026.133424_bib0180","doi-asserted-by":"crossref","DOI":"10.1016\/j.inffus.2024.102306","article-title":"Fusing pairwise modalities for emotion recognition in conversations","volume":"106","author":"Fan","year":"2024","journal-title":"Inf. Fusion."},{"key":"10.1016\/j.neucom.2026.133424_bib0185","doi-asserted-by":"crossref","DOI":"10.1016\/j.eswa.2023.122946","article-title":"MSER: multimodal speech emotion recognition using cross-attention with deep fusion","volume":"245","author":"Khan","year":"2024","journal-title":"Expert Syst. Appl."},{"key":"10.1016\/j.neucom.2026.133424_bib0190","series-title":"2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"2818","article-title":"Rethinking the inception architecture for computer vision","author":"Szegedy","year":"2016"},{"key":"10.1016\/j.neucom.2026.133424_bib0195","doi-asserted-by":"crossref","first-page":"5984","DOI":"10.1109\/TIP.2021.3089942","article-title":"Delving deep into label smoothing","volume":"30","author":"Zhang","year":"2021","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.neucom.2026.133424_bib0200","series-title":"2018 IEEE\/CVF Conference on Computer Vision and Pattern Recognition","first-page":"7482","article-title":"Multi-task learning using uncertainty to weigh losses for scene geometry and semantics","author":"Cipolla","year":"2018"},{"key":"10.1016\/j.neucom.2026.133424_bib0205","series-title":"Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)","first-page":"7042","article-title":"DialogueCRN: contextual reasoning networks for emotion recognition in conversations","author":"Hu","year":"2021"},{"key":"10.1016\/j.neucom.2026.133424_bib0210","doi-asserted-by":"crossref","first-page":"77","DOI":"10.1109\/TMM.2023.3260635","article-title":"GraphCFC: a directed graph based Cross-Modal feature complementation approach for multimodal conversational emotion recognition","volume":"26","author":"Li","year":"2024","journal-title":"IEEE Trans. Multimedia"},{"key":"10.1016\/j.neucom.2026.133424_bib0215","series-title":"Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing","first-page":"16051","article-title":"Joyful: joint modality fusion and graph contrastive learning for multimoda emotion recognition","author":"Li","year":"2023"},{"key":"10.1016\/j.neucom.2026.133424_bib0220","series-title":"2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","first-page":"10761","article-title":"Multivariate, Multi-Frequency and multimodal: rethinking graph neural networks for emotion recognition in conversation","author":"Chen","year":"2023"},{"key":"10.1016\/j.neucom.2026.133424_bib0225","series-title":"Proceedings of the 32nd ACM International Conference on Multimedia","first-page":"4341","article-title":"Multimodal fusion via hypergraph autoencoder and contrastive learning for emotion recognition in conversation","author":"Yi","year":"2024"},{"key":"10.1016\/j.neucom.2026.133424_bib0230","series-title":"Proceedings of the 32nd ACM International Conference on Multimedia","first-page":"4795","article-title":"DQ-former: querying transformer with dynamic modality priority for cognitive-aligned multimodal emotion recognition in conversation","author":"Jing","year":"2024"},{"key":"10.1016\/j.neucom.2026.133424_bib0235","doi-asserted-by":"crossref","DOI":"10.1016\/j.neucom.2024.128937","article-title":"Dual-level constraint based distributed graph convolution network for multimodal emotion recognition in conversation","volume":"618","author":"Xiang","year":"2025","journal-title":"Neurocomputing"},{"key":"10.1016\/j.neucom.2026.133424_bib0240","doi-asserted-by":"crossref","first-page":"192","DOI":"10.1016\/j.patrec.2025.02.024","article-title":"Multi-corpus emotion recognition method based on cross-modal gated attention fusion","volume":"190","author":"Ryumina","year":"2025","journal-title":"Pattern Recognit. Lett."},{"key":"10.1016\/j.neucom.2026.133424_bib0245","author":"Loshchilov"},{"issue":"4","key":"10.1016\/j.neucom.2026.133424_bib0250","doi-asserted-by":"crossref","first-page":"312","DOI":"10.1016\/j.icte.2020.04.010","article-title":"The effect of batch size on the generalizability of the convolutional neural networks on a histopathology dataset","volume":"6","author":"Kandel","year":"2020","journal-title":"ICT Express"},{"issue":"11","key":"10.1016\/j.neucom.2026.133424_bib0255","article-title":"Visualizing data using t-SNE","volume":"9","author":"Van der Maaten","year":"2008","journal-title":"J. Mach. Learn. Res."},{"key":"10.1016\/j.neucom.2026.133424_bib0260","author":"Choromanski"},{"key":"10.1016\/j.neucom.2026.133424_bib0265","author":"Gu"}],"container-title":["Neurocomputing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0925231226008210?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0925231226008210?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T20:34:16Z","timestamp":1776890056000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0925231226008210"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,6]]},"references-count":53,"alternative-id":["S0925231226008210"],"URL":"https:\/\/doi.org\/10.1016\/j.neucom.2026.133424","relation":{},"ISSN":["0925-2312"],"issn-type":[{"value":"0925-2312","type":"print"}],"subject":[],"published":{"date-parts":[[2026,6]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"MMCGR: A multimodal cascaded gated fusion RWKV-based model for emotion recognition in conversations","name":"articletitle","label":"Article Title"},{"value":"Neurocomputing","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.neucom.2026.133424","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2026 Elsevier B.V. All rights are reserved, including those for text and data mining, AI training, and similar technologies.","name":"copyright","label":"Copyright"}],"article-number":"133424"}}