{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T07:00:00Z","timestamp":1775199600021,"version":"3.50.1"},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11434608","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-5","source":"Crossref","is-referenced-by-count":0,"title":["Qieemo: Multimodal Emotion Recognition Based on the ASR Backbone"],"prefix":"10.1109","author":[{"given":"Jinming","family":"Chen","sequence":"first","affiliation":[{"name":"Qifu Technology,Shanghai,China"}]},{"given":"Jingyi","family":"Fang","sequence":"additional","affiliation":[{"name":"Qifu Technology,Shanghai,China"}]},{"given":"Yuanzhong","family":"Zheng","sequence":"additional","affiliation":[{"name":"Qifu Technology,Shanghai,China"}]},{"given":"Yaoxuan","family":"Wang","sequence":"additional","affiliation":[{"name":"Qifu Technology,Shanghai,China"}]},{"given":"Haojun","family":"Fei","sequence":"additional","affiliation":[{"name":"Qifu Technology,Shanghai,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2019.2929050"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2023.102218"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/SLT61566.2024.10832143"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/JBHI.2024.3392564"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414286"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1477"},{"key":"ref7","article-title":"Speech emotion recognition via cnn-transforemr and multidimensional attention mechanism","author":"Tang","year":"2024","journal-title":"arXiv preprint arXiv:2403.04743"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/icassp48485.2024.10447232"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747095"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/s11227-024-06158-x"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10446812"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICME57554.2024.10688053"},{"key":"ref13","article-title":"Efficient conformerbased speech recognition with linear attention","volume-title":"arXiv: Sound, arXiv: Sound","author":"Li"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/s10579-008-9076-6"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414006"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.931"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447232"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096709"},{"key":"ref19","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","author":"Baevski","year":"2020"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3188113"},{"key":"ref22","first-page":"1416","article-title":"Efficient self-supervised learning with contextualized target representations for vision, speech and language","volume-title":"International Conference on Machine Learning","author":"Baevski"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-3015"},{"key":"ref24","article-title":"Funaudiollm: Voice understanding and generation foundation models for natural interaction between humans and 11 ms","author":"SpeechTeam","year":"2024","journal-title":"arXiv preprint arXiv:2407.04051"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICPRS62101.2024.10677820"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.naacl-long.5"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11434608.pdf?arnumber=11434608","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:56:45Z","timestamp":1775192205000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11434608\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11434608","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}