{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T19:14:43Z","timestamp":1776885283925,"version":"3.51.2"},"reference-count":45,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100018693","name":"Horizon Europe","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100018693","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100002701","name":"Ministry of Education","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100002701","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11434752","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-8","source":"Crossref","is-referenced-by-count":1,"title":["Streaming Endpointer for Spoken Dialogue using Neural Audio Codecs and Label-Delayed Training"],"prefix":"10.1109","author":[{"given":"Sathvik","family":"Udupa","sequence":"first","affiliation":[{"name":"Brno University of Technology,Czechia"}]},{"given":"Shinji","family":"Watanabe","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University,United States"}]},{"given":"Petr","family":"Schwarz","sequence":"additional","affiliation":[{"name":"Brno University of Technology,Czechia"}]},{"given":"Jan","family":"Cernocky","sequence":"additional","affiliation":[{"name":"Brno University of Technology,Czechia"}]}],"member":"263","reference":[{"key":"ref1","article-title":"WavChat: A Survey of Spoken Dialogue Models","author":"Ji","year":"2024","journal-title":"arXiv preprint arXiv:2411.13577"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2024.3358423"},{"key":"ref3","article-title":"Gemini: a family of highly capable multimodal models","author":"Team","year":"2023","journal-title":"arXiv preprint arXiv:2312.11805"},{"key":"ref4","volume-title":"OpenAI-gpt-4o","year":"2025"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-496"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10955"},{"key":"ref7","article-title":"Talking Turns: Benchmarking Audio Foundation Models on Turn-Taking Dynamics","author":"Arora","year":"2025","journal-title":"ICLR"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2005-458"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/97.736233"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2018.2800728"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461921"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2001.940814"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461478"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096595"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3129994"},{"key":"ref16","article-title":"High Fidelity Neural Audio Compression","author":"D\u00e9fossez","year":"2023","journal-title":"Transactions on Machine Learning Research"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096509"},{"key":"ref18","article-title":"Moshi: a speech-text foundation model for real-time dialogue","author":"D\u00e9fossez","year":"2024","journal-title":"arXiv, no. arXiv:2410.00037"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.616"},{"key":"ref20","volume-title":"ESPnet-Codec: Comprehensive Training and Evaluation of Neural Codecs for Audio, Music, and Speech","author":"Shi","year":"2024"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-330"},{"key":"ref22","volume-title":"High-Fidelity Simultaneous Speech-To-Speech Translation","author":"Labiausse","year":"2025"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3207050"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2024.3469530"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2020.3020696"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683109"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-566"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054715"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-11216"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389743"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096155"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-2037"},{"key":"ref33","first-page":"11 873","article-title":"Multilingual turn-taking prediction using voice activity projection","volume-title":"Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)","author":"Inoue"},{"key":"ref34","first-page":"7171","article-title":"Yeah, un, oh: Continuous and real-time backchannel prediction with fine-tuning of voice activity projection","volume-title":"Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)","author":"Inoue"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447196"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054358"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.593"},{"key":"ref38","article-title":"Simple and controllable music generation","volume":"36","author":"Copet","year":"2024","journal-title":"NeurIPS"},{"key":"ref39","article-title":"SpokenWOZ: a large-scale speech-text benchmark for spoken task-oriented dialogue agents","author":"Si","year":"2024","journal-title":"NeurIPS"},{"key":"ref40","volume-title":"Silero VAD: pre-trained enterprise-grade Voice Activity Detector (VAD), Number Detector and Language Classifier","author":"Team","year":"2024"},{"key":"ref41","article-title":"PyTorch: an imperative style, high-performance deep learning library","author":"Paszke","year":"2019","journal-title":"NeurIPS"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096775"},{"key":"ref44","article-title":"Llm-enhanced dialogue management for full-duplex spoken dialogue systems","author":"Zhang","year":"2025","journal-title":"arXiv preprint arXiv:2502.14145"},{"key":"ref45","article-title":"Minmo: A multimodal large language model for seamless voice interaction","author":"Chen","year":"2025","journal-title":"arXiv preprint arXiv:2501.06282"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11434752.pdf?arnumber=11434752","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:59:20Z","timestamp":1775192360000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11434752\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":45,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11434752","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}