{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T07:04:04Z","timestamp":1775199844852,"version":"3.50.1"},"reference-count":27,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11434669","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-4","source":"Crossref","is-referenced-by-count":0,"title":["Open Full-duplex Voice Agent with Speech-to-Speech Language Model"],"prefix":"10.1109","author":[{"given":"Edresson","family":"Casanova","sequence":"first","affiliation":[{"name":"NVIDIA"}]},{"given":"Chen","family":"Chen","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Kevin","family":"Hu","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Ankita","family":"Pasad","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Elena","family":"Rastorgueva","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Seelan Lakshmi","family":"Narasimhan","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Slyne","family":"Deng","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Ehsan","family":"Hosseini-Asl","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Piotr","family":"Zelasko","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Valentin","family":"Mendelev","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Subhankar","family":"Ghosh","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Yifan","family":"Peng","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Zhehuai","family":"Chen","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Jason","family":"Li","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Jagadeesh","family":"Balam","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Vitaly","family":"Lavrukhin","sequence":"additional","affiliation":[{"name":"NVIDIA"}]},{"given":"Boris","family":"Ginsburg","sequence":"additional","affiliation":[{"name":"NVIDIA"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Language models are few-shot learners","author":"Brown","year":"2020"},{"key":"ref2","article-title":"Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context","author":"Team","year":"2024","journal-title":"arXiv preprint arXiv:2403.05530"},{"key":"ref3","article-title":"Gpt-4 technical report","volume-title":"arXiv preprint arXiv:2303.08774","author":"Achiam","year":"2023"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447605"},{"key":"ref5","article-title":"Qwen-audio: Advancing universal audio understanding via unified largescale audio-language models","author":"Chu","year":"2023","journal-title":"arXiv preprint arXiv:2311.07919"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447553"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i21.30570"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.1055"},{"key":"ref9","article-title":"Unified speech-text pretraining for spoken dialog modeling","author":"Kim","year":"2024","journal-title":"arXiv preprint arXiv:2402.05706"},{"key":"ref10","article-title":"Glm-4-voice: Towards intelligent and human-like end-to-end spoken chatbot","author":"Zeng","year":"2024","journal-title":"arXiv preprint arXiv:2412.02612"},{"key":"ref11","article-title":"Moshi: a speech-text foundation model for real-time dialogue","author":"D\u00e9fossez","year":"2024","journal-title":"arXiv preprint arXiv:2410.00037"},{"key":"ref12","article-title":"Salmonn-omni: A codec-free llm for full-duplex speech understanding and generation","author":"Yu","year":"2024","journal-title":"arXiv preprint arXiv:2411.18138"},{"key":"ref13","article-title":"Minmo: A multimodal large language model for seamless voice interaction","author":"Chen","year":"2025","journal-title":"arXiv preprint arXiv:2501.06282"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.709"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2025-874"},{"key":"ref16","article-title":"Nanocodec: Towards high-quality ultra fast speech 11 m inference","year":"2025","journal-title":"Anonymous"},{"key":"ref17","article-title":"STT En FastConformer Hybrid Transducer-CTC Large Streaming 80 ms","volume-title":"NVIDIA"},{"key":"ref18","article-title":"Tinyllama: An open-source small language model","author":"Zhang","year":"2024"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-1575"},{"key":"ref20","first-page":"69","article-title":"The fisher corpus: A resource for the next generations of speech-to-text","volume":"4","author":"Cieri","year":"2004","journal-title":"LREC"},{"key":"ref21","article-title":"Triton inference server","journal-title":"NVIDIA"},{"key":"ref22","article-title":"Getting started with cuda graphs"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.52202\/068431-1189"},{"key":"ref24","article-title":"Tensorrt-llm","journal-title":"NVIDIA"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-439"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00545"},{"key":"ref27","article-title":"Voicebench: Benchmarking llm-based voice assistants","author":"Chen","year":"2024","journal-title":"arXiv preprint arXiv:2410.17196"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11434669.pdf?arnumber=11434669","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:58:00Z","timestamp":1775192280000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11434669\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11434669","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}