{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,8,7]],"date-time":"2025-08-07T21:17:46Z","timestamp":1754601466971,"version":"3.32.0"},"reference-count":21,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,11,7]],"date-time":"2024-11-07T00:00:00Z","timestamp":1730937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,11,7]],"date-time":"2024-11-07T00:00:00Z","timestamp":1730937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,11,7]]},"DOI":"10.1109\/iscslp63861.2024.10800234","type":"proceedings-article","created":{"date-parts":[[2024,12,23]],"date-time":"2024-12-23T19:11:17Z","timestamp":1734981077000},"page":"636-640","source":"Crossref","is-referenced-by-count":1,"title":["Improving Emotion Recognition with Pre-Trained Models, Multimodality, and Contextual Information"],"prefix":"10.1109","author":[{"given":"Zhengshun","family":"Xia","sequence":"first","affiliation":[{"name":"Shanghai Jiao Tong University,MoE Key Lab of Artificial Intelligence, AI Institute, X-LANCE Lab"}]},{"given":"Ziyang","family":"Ma","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,MoE Key Lab of Artificial Intelligence, AI Institute, X-LANCE Lab"}]},{"given":"Zhisheng","family":"Zheng","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,MoE Key Lab of Artificial Intelligence, AI Institute, X-LANCE Lab"}]},{"given":"Xie","family":"Chen","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,MoE Key Lab of Artificial Intelligence, AI Institute, X-LANCE Lab"}]}],"member":"263","reference":[{"issue":"2","key":"ref1","first-page":"45","article-title":"Multi-modal emotion recognition from speech and text","volume":"9","author":"Chuang","year":"2004","journal-title":"International Journal of Computational Linguistics & Chinese Language Processing"},{"key":"ref2","article-title":"Multi-modal emotion recognition on iemocap with neural networks","author":"Tripathi","year":"2018","journal-title":"arXiv preprint"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.996"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639633"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3610661.3616189"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2019.2916866"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.01024"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1167"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-1705"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.naacl-main.306"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN52387.2021.9533718"},{"key":"ref12","article-title":"emotion2vec: Self-supervised pre-training for speech emotion representation","author":"Ma","year":"2023","journal-title":"arXiv preprint"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1236"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/s10579-008-9076-6"},{"key":"ref15","first-page":"1298","article-title":"Data2vec: A general framework for self-supervised learning in speech, vision and language","volume-title":"International Conference on Machine Learning","author":"Baevski","year":"2022"},{"key":"ref16","first-page":"1416","article-title":"Efficient selfsupervised learning with contextualized target representations for vision, speech and language","volume-title":"International Conference on Machine Learning","author":"Baevski","year":"2023"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref19","article-title":"Bert: Pretraining of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv preprint"},{"key":"ref20","article-title":"Xlnet: Generalized autoregressive pretraining for language understanding","volume":"32","author":"Yang","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-84186-7_31"}],"event":{"name":"2024 IEEE 14th International Symposium on Chinese Spoken Language Processing (ISCSLP)","start":{"date-parts":[[2024,11,7]]},"location":"Beijing, China","end":{"date-parts":[[2024,11,10]]}},"container-title":["2024 IEEE 14th International Symposium on Chinese Spoken Language Processing (ISCSLP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10799944\/10799969\/10800234.pdf?arnumber=10800234","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,12,24]],"date-time":"2024-12-24T06:33:25Z","timestamp":1735022005000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10800234\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,7]]},"references-count":21,"URL":"https:\/\/doi.org\/10.1109\/iscslp63861.2024.10800234","relation":{},"subject":[],"published":{"date-parts":[[2024,11,7]]}}}