{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T19:22:43Z","timestamp":1776885763537,"version":"3.51.2"},"reference-count":55,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11433841","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-8","source":"Crossref","is-referenced-by-count":5,"title":["ZipVoice: Fast and High-Quality Zero-Shot Text-to-Speech with Flow Matching"],"prefix":"10.1109","author":[{"given":"Han","family":"Zhu","sequence":"first","affiliation":[{"name":"Xiaomi Corp,Beijing,China"}]},{"given":"Wei","family":"Kang","sequence":"additional","affiliation":[{"name":"Xiaomi Corp,Beijing,China"}]},{"given":"Zengwei","family":"Yao","sequence":"additional","affiliation":[{"name":"Xiaomi Corp,Beijing,China"}]},{"given":"Liyong","family":"Guo","sequence":"additional","affiliation":[{"name":"Xiaomi Corp,Beijing,China"}]},{"given":"Fangjun","family":"Kuang","sequence":"additional","affiliation":[{"name":"Xiaomi Corp,Beijing,China"}]},{"given":"Zhaoqing","family":"Li","sequence":"additional","affiliation":[{"name":"Xiaomi Corp,Beijing,China"}]},{"given":"Weiji","family":"Zhuang","sequence":"additional","affiliation":[{"name":"Xiaomi Corp,Beijing,China"}]},{"given":"Long","family":"Lin","sequence":"additional","affiliation":[{"name":"Xiaomi Corp,Beijing,China"}]},{"given":"Daniel","family":"Povey","sequence":"additional","affiliation":[{"name":"Xiaomi Corp,Beijing,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TASLPRO.2025.3530270"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.3362\/0262-8104.2002.009"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/SLT61566.2024.10832320"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.313"},{"key":"ref5","article-title":"MaskGCT: Zero-shot text-to-speech with masked generative codec transformer","volume-title":"The Thirteenth International Conference on Learning Representations","author":"Wang"},{"key":"ref6","article-title":"Fireredtts: A foundation text-to-speech framework for industry-level generative speech applications","author":"Guo","year":"2024","journal-title":"arXiv preprint arXiv:2409.03283"},{"key":"ref7","article-title":"Seed-tts: A family of high-quality versatile speech generation models","author":"Anastassiou","year":"2024","journal-title":"arXiv preprint arXiv:2406.02430"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2441"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447120"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/SLT61566.2024.10832365"},{"key":"ref11","article-title":"Naturalspeech 2: Latent diffusion models are natural and zeroshot speech and singing synthesizers","author":"Shen","year":"2024","journal-title":"ICLR"},{"key":"ref12","article-title":"Flow matching for generative modeling","volume-title":"The Eleventh International Conference on Learning Representations","author":"Lipman"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TASLPRO.2025.3557242"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.52202\/075280-3246"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref16","article-title":"Zipformer: A faster and better encoder for automatic speech recognition","volume-title":"The Twelfth International Conference on Learning Representations","author":"Yao"},{"key":"ref17","article-title":"Classifier-free diffusion guidance","volume-title":"NeurIPS 2021 Workshop on Deep Generative Models and Downstream Applications","author":"Ho"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00453"},{"key":"ref19","article-title":"U-dits: Downsample tokens in u-shaped diffusion transformers","volume-title":"The Thirty-eighth Annual Conference on Neural Information Processing Systems","author":"Tian"},{"key":"ref20","article-title":"Fastspeech: Fast, robust and controllable text to speech","volume":"32","author":"Ren","year":"2019","journal-title":"Advances in neural information processing systems"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-3015"},{"key":"ref22","first-page":"8067","article-title":"Glow-tts: A generative flow for text-to-speech via monotonic alignment search","volume-title":"Advances in Neural Information Processing Systems","volume":"33","author":"Kim"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054484"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-1392"},{"key":"ref25","article-title":"DiTTo-TTS: Diffusion transformers for scalable text-to-speech without domain-specific factors","volume-title":"The Thirteenth International Conference on Learning Representations","author":"Lee"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01548"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01374"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1452"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"ref30","article-title":"Revisiting oversmoothness in text to speech","author":"Ren","year":"2022","journal-title":"arXiv preprint arXiv:2202.13066"},{"key":"ref31","first-page":"8599","article-title":"Gradtts: A diffusion probabilistic model for text-to-speech","volume-title":"International Conference on Machine Learning.","author":"Popov"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448291"},{"key":"ref33","first-page":"32211","article-title":"Consistency models","volume-title":"International Conference on Machine Learning.","author":"Song"},{"key":"ref34","article-title":"Flow straight and fast: Learning to generate and transfer data with rectified flow","volume-title":"The Eleventh International Conference on Learning Representations","author":"Liu"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3612061"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447822"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10445948"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681044"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10889258"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414403"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389666"},{"key":"ref42","first-page":"4218","article-title":"Common voice: A massively-multilingual speech corpus","volume-title":"Proceedings of the Twelfth Language Resources and Evaluation Conference","author":"Ardila"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414423"},{"key":"ref44","article-title":"Vocos: Closing the gap between time-domain and fourierbased neural vocoders for high-quality audio synthesis","volume-title":"The Twelfth International Conference on Learning Representations","author":"Siuzdak"},{"key":"ref45","first-page":"28492","article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"International conference on machine learning.","author":"Radford"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-9996"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3188113"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2650"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-439"},{"key":"ref51","article-title":"Cosyvoice: A scalable multilingual zero-shot text-tospeech synthesizer based on supervised semantic tokens","author":"Du","year":"2024","journal-title":"arXiv preprint arXiv:2407.05407"},{"key":"ref52","article-title":"Cosyvoice 2: Scalable streaming speech synthesis with large language models","volume-title":"arXiv preprint arXiv:2412.10117","author":"Du","year":"2024"},{"key":"ref53","article-title":"Spark-tts: An efficient 11 m -based text-tospeech model with single-stream decoupled speech tokens","author":"Wang","year":"2025","journal-title":"arXiv preprint arXiv:2503.01710"},{"key":"ref54","first-page":"2709","article-title":"Yourtts: Towards zero-shot multi-speaker tts and zero-shot voice conversion for everyone","volume-title":"International conference on machine learning.","author":"Casanova"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/SLT61566.2024.10832255"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11433841.pdf?arnumber=11433841","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:55:15Z","timestamp":1775192115000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11433841\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":55,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11433841","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}