{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T22:40:30Z","timestamp":1773528030342,"version":"3.50.1"},"reference-count":39,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"name":"National Research Foundation of Kore","award":["RS-2023-00219107"],"award-info":[{"award-number":["RS-2023-00219107"]}]},{"name":"Institute of Information & communications Technology Planning & Evaluation","award":["IITP-2023-RS-2023-00256629"],"award-info":[{"award-number":["IITP-2023-RS-2023-00256629"]}]},{"DOI":"10.13039\/501100014188","name":"Ministry of Science and ICT, South Korea","doi-asserted-by":"publisher","award":["IITP-2023-RS-2022-00156287"],"award-info":[{"award-number":["IITP-2023-RS-2022-00156287"]}],"id":[{"id":"10.13039\/501100014188","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2023.3348518","type":"journal-article","created":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T19:51:03Z","timestamp":1704138663000},"page":"2349-2360","source":"Crossref","is-referenced-by-count":10,"title":["Residual Relation-Aware Attention Deep Graph-Recurrent Model for Emotion Recognition in Conversation"],"prefix":"10.1109","volume":"12","author":[{"given":"Anh-Quang","family":"Duong","sequence":"first","affiliation":[{"name":"Department of Artificial Intelligent Convergence, Chonnam National University, Gwangju, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7539-2016","authenticated-orcid":false,"given":"Ngoc-Huynh","family":"Ho","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligent Convergence, Chonnam National University, Gwangju, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2385-9673","authenticated-orcid":false,"given":"Sudarshan","family":"Pant","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligent Convergence, Chonnam National University, Gwangju, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4221-3058","authenticated-orcid":false,"given":"Seungwon","family":"Kim","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligent Convergence, Chonnam National University, Gwangju, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3575-5035","authenticated-orcid":false,"given":"Soo-Hyung","family":"Kim","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligent Convergence, Chonnam National University, Gwangju, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3024-5060","authenticated-orcid":false,"given":"Hyung-Jeong","family":"Yang","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligent Convergence, Chonnam National University, Gwangju, South Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1015"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.597"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.findings-emnlp.224"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.125"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.123"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/W14-4012"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1017\/cbo9781139833813"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/B978-0-12-558701-3.50007-7"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.dss.2018.09.002"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683293"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.3115\/1621474.1621487"},{"key":"ref12","first-page":"40","article-title":"WordNet affect: An affective extension of WordNet","volume-title":"Proc. LREC","volume":"4","author":"Strapparava"},{"key":"ref13","first-page":"26","article-title":"Emotions evoked by common words and phrases: Using mechanical Turk to create an emotion lexicon","volume-title":"Proc. NAACL HLT","author":"Mohammad"},{"key":"ref14","article-title":"MELD: A multimodal multi-party dataset for emotion recognition in conversations","author":"Poria","year":"2018","journal-title":"arXiv:1810.02508"},{"key":"ref15","first-page":"44","article-title":"Emotion detection on tv show transcripts with sequence-based convolutional neural networks","volume-title":"Proc. AAAI Workshop Affect. Content Anal.","author":"Zahiri"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/s10579-008-9076-6"},{"key":"ref17","article-title":"DailyDialog: A manually labelled multi-turn dialogue dataset","author":"Li","year":"2017","journal-title":"arXiv:1710.03957"},{"key":"ref18","article-title":"HiGRU: Hierarchical gated recurrent units for utterance-level emotion recognition","author":"Jiao","year":"2019","journal-title":"arXiv:1904.04446"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33016818"},{"key":"ref20","article-title":"A hierarchical transformer with speaker modeling for emotion recognition in conversation","author":"Li","year":"2020","journal-title":"arXiv:2012.14781"},{"key":"ref21","article-title":"EmoBERTa: Speaker-aware emotion recognition in conversation with RoBERTa","author":"Kim","year":"2021","journal-title":"arXiv:2108.12009"},{"key":"ref22","article-title":"RoBERTa: A robustly optimized BERT pretraining approach","author":"Liu","year":"2019","journal-title":"arXiv:1907.11692"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2019\/752"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i15.17625"},{"key":"ref25","first-page":"5753","article-title":"XLNet: Generalized autoregressive pretraining for language understanding","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Yang"},{"key":"ref26","article-title":"S+PAGE: A speaker and position-aware graph neural network model for emotion recognition in conversation","author":"Liang","year":"2021","journal-title":"arXiv:2112.12389"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.394"},{"key":"ref28","article-title":"Directed acyclic graph neural networks","author":"Thost","year":"2021","journal-title":"arXiv:2101.07965"},{"key":"ref29","article-title":"A note on over-smoothing for graph neural networks","author":"Cai","year":"2020","journal-title":"arXiv:2006.13318"},{"key":"ref30","article-title":"EmotionLines: An emotion corpus of multi-party conversations","author":"Chen","year":"2018","journal-title":"arXiv:1802.08379"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TAI.2022.3149234"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2021.107751"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747397"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2023.110285"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3260635"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2022.10.009"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/KSE59128.2023.10299463"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.26599\/TST.2023.9010021"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096161"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/10380310\/10378668.pdf?arnumber=10378668","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,13]],"date-time":"2024-01-13T00:14:46Z","timestamp":1705104886000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10378668\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/access.2023.3348518","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}