{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T07:11:34Z","timestamp":1775200294927,"version":"3.50.1"},"reference-count":30,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11434783","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-4","source":"Crossref","is-referenced-by-count":0,"title":["ChipChat: Low-Latency Cascaded Conversational Agent in MLX"],"prefix":"10.1109","author":[{"given":"Tatiana","family":"Likhomanenko","sequence":"first","affiliation":[{"name":"Apple"}]},{"given":"Luke","family":"Carlson","sequence":"additional","affiliation":[{"name":"Apple"}]},{"given":"Richard He","family":"Bai","sequence":"additional","affiliation":[{"name":"Apple"}]},{"given":"Zijin","family":"Gu","sequence":"additional","affiliation":[{"name":"Apple"}]},{"given":"Han","family":"Tran","sequence":"additional","affiliation":[{"name":"Apple"}]},{"given":"Zakaria","family":"Aldeneh","sequence":"additional","affiliation":[{"name":"Apple"}]},{"given":"Yizhe","family":"Zhang","sequence":"additional","affiliation":[{"name":"Apple"}]},{"given":"Ruixiang","family":"Zhang","sequence":"additional","affiliation":[{"name":"Apple"}]},{"given":"Huangjie","family":"Zheng","sequence":"additional","affiliation":[{"name":"Apple"}]},{"given":"Navdeep","family":"Jaitly","sequence":"additional","affiliation":[{"name":"Apple"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Challenges for spoken dialogue systems","volume-title":"Proceedings of the 1999 IEEE ASRU Workshop","volume":"696","author":"Glass"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i21.30570"},{"key":"ref3","article-title":"Funaudiollm: Voice understanding and generation foundation models for natural interaction between humans and 11 ms","author":"An","year":"2024","journal-title":"arXiv preprint arXiv:2407.04051"},{"key":"ref4","first-page":"30","article-title":"Spirit-lm: Interleaved spoken and written language model","volume":"13","author":"Nguyen","year":"2025","journal-title":"Transactions of the Association for Computational Linguistics"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.1055"},{"key":"ref6","article-title":"Salmonn: Towards generic hearing abilities for large language models","volume-title":"The Twelfth International Conference on Learning Representations","author":"Tang"},{"key":"ref7","article-title":"Moshi: a speech-text foundation model for real-time dialogue","author":"D\u00e9fossez","year":"2024","journal-title":"arXiv preprint arXiv:2410.00037"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.912"},{"key":"ref9","article-title":"Mini-omni: Language models can hear, talk while thinking in streaming","author":"Xie","year":"2024","journal-title":"arXiv preprint arXiv:2408.16725"},{"key":"ref10","article-title":"Qwen2. 5-omni technical report","volume-title":"arXiv preprint arXiv:2503.20215","author":"Xu","year":"2025"},{"key":"ref11","article-title":"Wavchat: A survey of spoken dialogue models","author":"Ji","year":"2024","journal-title":"arXiv preprint arXiv:2411.13577"},{"key":"ref12","article-title":"Gpt-4o system card","author":"Hurst","year":"2024","journal-title":"arXiv preprint arXiv:2410.21276"},{"key":"ref13","article-title":"Text-to-speech latency benchmark","year":"2024"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.naacl-demo.21"},{"key":"ref15","article-title":"Voicebench: Benchmarking llm-based voice assistants","author":"Chen","year":"2024","journal-title":"arXiv preprint arXiv:2410.17196"},{"key":"ref16","article-title":"Mmau: A massive multitask audio understanding and reasoning benchmark","volume-title":"The Thirteenth International Conference on Learning Representations","author":"Sakshi"},{"key":"ref17","article-title":"Talking turns: Benchmarking audio foundation models on turn-taking dynamics","volume-title":"The Thirteenth International Conference on Learning Representations","author":"Arora"},{"key":"ref18","article-title":"Discrete audio tokens: More than a survey!","author":"Mousavi","year":"2025","journal-title":"arXiv preprint arXiv:2506.10274"},{"key":"ref19","article-title":"MLX: Efficient and flexible machine learning on apple silicon","author":"Hannun","year":"2023"},{"key":"ref20","article-title":"Rabbitmq documentation"},{"key":"ref21","article-title":"Omni-router: Sharing routing decisions in sparse mixture-of-experts for speech recognition","author":"Gu","year":"2025","journal-title":"arXiv preprint arXiv:2507.05724"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-24797-2_7"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10887848"},{"key":"ref24","article-title":"Sage: Steering and refining dialog generation with state-action augmentation","author":"Zhang","year":"2025","journal-title":"arXiv preprint arXiv:2503.03040"},{"key":"ref25","article-title":"Mixtral of experts","author":"Jiang","year":"2024","journal-title":"arXiv preprint arXiv:2401.04088"},{"key":"ref26","article-title":"Mlx lm"},{"key":"ref27","article-title":"Speakstream: Streaming text-to-speech with interleaved data","author":"Bai","year":"2025","journal-title":"arXiv preprint arXiv:2505.19206"},{"key":"ref28","article-title":"dmel: Speech tokenization made simple","author":"Bai","year":"2024","journal-title":"arXiv preprint arXiv:2407.15835"},{"key":"ref29","article-title":"Gradio: Hassle-free sharing and testing of ml models in the wild","author":"Abid","year":"2019","journal-title":"arXiv preprint arXiv:1906.02569"},{"key":"ref30","article-title":"Kyutai unmute","author":"Kyutai","year":"2025"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11434783.pdf?arnumber=11434783","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:59:45Z","timestamp":1775192385000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11434783\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":30,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11434783","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}