{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,26]],"date-time":"2026-03-26T21:46:46Z","timestamp":1774561606937,"version":"3.50.1"},"reference-count":34,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"DOI":"10.13039\/501100003725","name":"National Research Foundation of Korea","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003725","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003621","name":"Korea Government","doi-asserted-by":"publisher","award":["NRF-2020R1A4A1019191"],"award-info":[{"award-number":["NRF-2020R1A4A1019191"]}],"id":[{"id":"10.13039\/501100003621","id-type":"DOI","asserted-by":"publisher"}]},{"name":"BK21 Fostering Outstanding Universities for Research"},{"name":"Ministry of Education (MOE), South Korea"},{"DOI":"10.13039\/501100003725","name":"NRF","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100003725","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2023]]},"DOI":"10.1109\/access.2023.3244390","type":"journal-article","created":{"date-parts":[[2023,2,16]],"date-time":"2023-02-16T21:13:03Z","timestamp":1676581983000},"page":"14742-14751","source":"Crossref","is-referenced-by-count":71,"title":["Multi-Label Multimodal Emotion Recognition With Transformer-Based Fusion and Emotion-Level Representation Learning"],"prefix":"10.1109","volume":"11","author":[{"given":"Hoai-Duy","family":"Le","sequence":"first","affiliation":[{"name":"Department of Artificial Intelligence Convergence, Chonnam National University, Gwangju, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8756-1382","authenticated-orcid":false,"given":"Guee-Sang","family":"Lee","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligence Convergence, Chonnam National University, Gwangju, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3575-5035","authenticated-orcid":false,"given":"Soo-Hyung","family":"Kim","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligence Convergence, Chonnam National University, Gwangju, South Korea"}]},{"given":"Seungwon","family":"Kim","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligence Convergence, Chonnam National University, Gwangju, South Korea"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3024-5060","authenticated-orcid":false,"given":"Hyung-Jeong","family":"Yang","sequence":"additional","affiliation":[{"name":"Department of Artificial Intelligence Convergence, Chonnam National University, Gwangju, South Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-018-6445-z"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.4018\/978-1-6684-6303-1.ch098"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/2993148.2997629"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/SMC.2019.8914655"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D15-1166"},{"key":"ref6","article-title":"Attention is all you need","volume-title":"Advances in Neural Information Processing Systems","volume":"30","author":"Vaswani","year":"2017"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1656"},{"key":"ref8","first-page":"14200","article-title":"Attention bottlenecks for multimodal fusion","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Nagrani"},{"key":"ref9","first-page":"24206","article-title":"VATT: Transformers for multimodal self-supervised learning from raw video, audio and text","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Akbari"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01939"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1007\/s10579-008-9076-6"},{"key":"ref12","first-page":"2236","article-title":"Multimodal language analysis in the wild: CMU-MOSEI dataset and interpretable dynamic fusion graph","volume-title":"Proc. 56th Annu. Meeting Assoc. Comput. Linguistics","volume":"1","author":"Bagher Zadeh"},{"key":"ref13","article-title":"Modality-transferable emotion embeddings for low-resource multimodal emotion recognition","author":"Dai","year":"2020","journal-title":"arXiv:2009.09629"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413678"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3462244.3479919"},{"key":"ref16","first-page":"9180","article-title":"Improving multimodal fusion with hierarchical mutual information maximization for multimodal sentiment analysis","volume-title":"Proc. Conf. Empirical Methods Natural Lang. Process.","author":"Han"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1115"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1209"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.challengehml-1.3"},{"key":"ref20","first-page":"5305","article-title":"Multimodal end-to-end sparse model for emotion recognition","volume-title":"Proc. Conf. North Amer. Chapter Assoc. Comput. Linguistics, Human Lang. Technol.","author":"Dai"},{"key":"ref21","article-title":"An image is worth 16\u00d716 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020","journal-title":"arXiv:2010.11929"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21315"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1514"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413869"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20044-1_28"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6964"},{"key":"ref28","article-title":"Query2Label: A simple transformer way to multi-label classification","author":"Liu","year":"2021","journal-title":"arXiv:2107.10834"},{"key":"ref29","first-page":"1","article-title":"PyTorch: An imperative style, high-performance deep learning library","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Paszke"},{"key":"ref30","first-page":"1","article-title":"ALBERT: A lite BERT for self-supervised learning of language representations","volume-title":"Proc. 8th Int. Conf. Learn. Represent. (ICLR)","author":"Lan"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICISCE.2017.95"},{"key":"ref32","article-title":"Very deep convolutional networks for large-scale image recognition","author":"Simonyan","year":"2014","journal-title":"arXiv:1409.1556"},{"key":"ref33","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv:1412.6980"},{"key":"ref34","article-title":"SGDR: Stochastic gradient descent with warm restarts","author":"Loshchilov","year":"2016","journal-title":"arXiv:1608.03983"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6287639\/10005208\/10042438.pdf?arnumber=10042438","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,13]],"date-time":"2024-02-13T14:47:11Z","timestamp":1707835631000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10042438\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"references-count":34,"URL":"https:\/\/doi.org\/10.1109\/access.2023.3244390","relation":{},"ISSN":["2169-3536"],"issn-type":[{"value":"2169-3536","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]}}}