{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T07:10:35Z","timestamp":1775200235125,"version":"3.50.1"},"reference-count":44,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11434762","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["EMO-Reasoning: Benchmarking Emotional Reasoning Capabilities in Spoken Dialogue Systems"],"prefix":"10.1109","author":[{"given":"Jingwen","family":"Liu","sequence":"first","affiliation":[{"name":"Zhejiang University"}]},{"given":"Kan Jen","family":"Cheng","sequence":"additional","affiliation":[{"name":"UC Berkeley"}]},{"given":"Jiachen","family":"Lian","sequence":"additional","affiliation":[{"name":"UC Berkeley"}]},{"given":"Akshay","family":"Anand","sequence":"additional","affiliation":[{"name":"UC Berkeley"}]},{"given":"Rishi","family":"Jain","sequence":"additional","affiliation":[{"name":"UC Berkeley"}]},{"given":"Faith","family":"Qiao","sequence":"additional","affiliation":[{"name":"UC Berkeley"}]},{"given":"Robin","family":"Netzorg","sequence":"additional","affiliation":[{"name":"UC Berkeley"}]},{"given":"Huang-Cheng","family":"Chou","sequence":"additional","affiliation":[{"name":"University of Southern California"}]},{"given":"Tingle","family":"Li","sequence":"additional","affiliation":[{"name":"UC Berkeley"}]},{"family":"Guan-Ting","sequence":"additional","affiliation":[{"name":"National Taiwan University"}]},{"given":"Gopala","family":"Anumanchipalli","sequence":"additional","affiliation":[{"name":"Zhejiang University"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.3389\/fpsyg.2013.00184"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1177\/00238309020450030301"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.52202\/075280-2771"},{"key":"ref4","article-title":"Spoken question answering and speech continuation using spectrogram-powered llm","author":"Nachmani","year":"2023","journal-title":"arXiv preprint arXiv:2305.15255"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2024-1514"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.997"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-612"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448257"},{"key":"ref9","article-title":"Dynamic-superb phase-2: A collaboratively expanding benchmark for measuring the capabilities of spoken language models with 180 tasks","author":"Huang","year":"2024","journal-title":"arXiv preprint arXiv:2411.05361"},{"key":"ref10","article-title":"Voxdialogue: Can spoken dialogue systems understand information beyond words?","volume-title":"The Thirteenth International Conference on Learning Representations","author":"Cheng"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.52202\/079017-1813"},{"key":"ref12","first-page":"6626","article-title":"Advancing large language models to capture varied speaking styles and respond properly in spoken conversations","volume-title":"Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)","author":"Lin"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.782"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10446933"},{"key":"ref15","article-title":"Full-duplex-bench: A benchmark to evaluate full-duplex spoken dialogue models on turn-taking capabilities","author":"Lin","year":"2025","journal-title":"arXiv preprint arXiv:2503.04721"},{"key":"ref16","article-title":"Full-duplex-bench v1. 5: Evaluating overlap handling for full-duplex speech models","author":"Lin","year":"2025","journal-title":"arXiv preprint arXiv:2507.23159"},{"key":"ref17","article-title":"Emo-superb: An in-depth look at speech emotion recognition","author":"Wu","year":"2024","journal-title":"arXiv preprint arXiv:2402.13018"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/slt61566.2024.10832296"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1080\/02699939208411068"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/0092-6566(77)90037-X"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2444"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3188113"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447805"},{"key":"ref24","article-title":"Cosyvoice: A scalable multilingual zero-shot text-to-speech synthesizer based on supervised semantic tokens","author":"Du","year":"2024","journal-title":"arXiv preprint arXiv:2407.05407"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095751"},{"key":"ref26","article-title":"Dailydialog: A manually labelled multi-turn dialogue dataset","author":"Li","year":"2017","journal-title":"arXiv preprint arXiv:1710.03957"},{"key":"ref27","article-title":"Llama-omni: Seamless speech interaction with large language models","author":"Fang","year":"2024","journal-title":"arXiv preprint arXiv:2409.06666"},{"key":"ref28","article-title":"Mini-omni: Language models can hear, talk while thinking in streaming","author":"Xie","year":"2024","journal-title":"arXiv preprint arXiv:2408.16725"},{"key":"ref29","volume-title":"Mini-omni2: Towards open-source gpt-4o with vision, speech and duplex capabilities","year":"2024"},{"key":"ref30","article-title":"Freeze-omni: A smart and low latency speech-to-speech dialogue model with frozen 1lm","author":"Wang","year":"2024","journal-title":"arXiv preprint arXiv:2411.00774"},{"key":"ref31","article-title":"Glm-4-voice: Towards intelligent and human-like end-to-end spoken chatbot","author":"Zeng","year":"2024","journal-title":"arXiv preprint arXiv:2412.02612"},{"key":"ref32","article-title":"Moshi: a speech-text foundation model for real-time dialogue","author":"D\u00e9fossez","year":"2024","journal-title":"arXiv preprint arXiv:2410.00037"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00545"},{"key":"ref34","article-title":"Funaudiollm: Voice understanding and generation foundation models for natural interaction between humans and 11 ms","author":"An","year":"2024","journal-title":"arXiv preprint arXiv:2407.04051"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389771"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.eacl-long.32"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.52202\/079017-3230"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2025.3579972"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-1855"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2025-2658"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2025-2446"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2025-2496"},{"key":"ref43","volume-title":"Time and tokens: Benchmarking end-to-end speech dysfluency detection","author":"Zhou","year":"2024"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/SLT61566.2024.10832222"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11434762.pdf?arnumber=11434762","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:59:27Z","timestamp":1775192367000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11434762\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":44,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11434762","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}