{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,5]],"date-time":"2025-11-05T06:53:30Z","timestamp":1762325610781,"version":"3.37.3"},"reference-count":72,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Center for Perceptual and Interactive Intelligence"},{"name":"Innovation and Technology Commission&#x0027;s InnoHK Scheme"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE\/ACM Trans. Audio Speech Lang. Process."],"published-print":{"date-parts":[[2023]]},"DOI":"10.1109\/taslp.2023.3272470","type":"journal-article","created":{"date-parts":[[2023,5,2]],"date-time":"2023-05-02T22:08:30Z","timestamp":1683065310000},"page":"1811-1824","source":"Crossref","is-referenced-by-count":11,"title":["MSMC-TTS: Multi-Stage Multi-Codebook VQ-VAE Based Neural TTS"],"prefix":"10.1109","volume":"31","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3393-9984","authenticated-orcid":false,"given":"Haohan","family":"Guo","sequence":"first","affiliation":[{"name":"Human-Computer Communications Laboratory (HCCL), Department of Systems Engineering and Engineering Management, The Chinese University of Hong Kong, Hong Kong SAR, China"}]},{"given":"Fenglong","family":"Xie","sequence":"additional","affiliation":[{"name":"Xiaohongshu, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9543-1572","authenticated-orcid":false,"given":"Xixin","family":"Wu","sequence":"additional","affiliation":[{"name":"Stanley Ho Big Data Decision Analytics Research Centre, The Chinese University of Hong Kong, Hong Kong SAR, China"}]},{"given":"Frank K.","family":"Soong","sequence":"additional","affiliation":[{"name":"Microsoft, Beijing, China"}]},{"given":"Helen","family":"Meng","sequence":"additional","affiliation":[{"name":"Human-Computer Communications Laboratory (HCCL), Department of Systems Engineering and Engineering Management, The Chinese University of Hong Kong, Hong Kong SAR, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/SLT48900.2021.9383460"},{"key":"ref2","first-page":"16582","article-title":"Neural dubber: Dubbing for videos according to scripts","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Hu","year":"2021"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747427"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054596"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1951"},{"article-title":"A survey on neural speech synthesis","year":"2021","author":"Tan","key":"ref6"},{"key":"ref7","first-page":"2787","article-title":"Instantaneous pitch estimation based on RAPT framework","volume-title":"Proc. IEEE 20th Eur. Signal Process. Conf.","author":"Azarov","year":"2012"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.1984.1172448"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.21437\/ICSLP.1994-275"},{"key":"ref10","first-page":"125","article-title":"WaveNet: A generative model for raw audio","volume-title":"Proc. Speech Synth. Workshop","author":"Oord","year":"2016"},{"key":"ref11","first-page":"2410","article-title":"Efficient neural audio synthesis","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kalchbrenner","year":"2018"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683143"},{"key":"ref13","first-page":"14910","article-title":"MelGAN: Generative adversarial networks for conditional waveform synthesis","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kumar","year":"2019"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-584"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2017.2761547"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1424"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2786"},{"key":"ref18","first-page":"71","article-title":"Universal MelGAN: A robust neural vocoder for high-fidelity waveform generation in multiple domains","volume-title":"Proc. Int. Symp. Chin. Spoken Lang. Process.","author":"Jang","year":"2020"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414444"},{"key":"ref20","article-title":"Acoustic modeling in statistical parametric speech synthesis-from HMM to LSTM-RNN","volume-title":"Proc. Mach. Learn. Speech Lang. Process.","author":"Zen","year":"2015"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2014-443"},{"key":"ref22","first-page":"3165","article-title":"FastSpeech: Fast, robust and controllable text to speech","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ren","year":"2019"},{"key":"ref23","article-title":"FastSpeech 2: Fast and high-quality end-to-end text to speech","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Ren","year":"2021"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33016706"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2176"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-971"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-52"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054484"},{"key":"ref29","first-page":"8067","article-title":"Glow-TTS: A generative flow for text-to-speech via monotonic alignment search","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kim","year":"2020"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"article-title":"Adapting TTS models for new speakers using transfer learning","year":"2021","author":"Neekhara","key":"ref31"},{"key":"ref32","first-page":"195","article-title":"Deep voice: Real-time neural text-to-speech","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ark","year":"2017"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413851"},{"key":"ref34","first-page":"5530","article-title":"Conditional variational autoencoder with adversarial learning for end-to-end text-to-speech","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kim","year":"2021"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414408"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/11494669_93"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2015.11.015"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICMLA.2009.48"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/WSS.2002.1224367"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2938863"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2013.50"},{"article-title":"Recent advances in autoencoder-based representation learning","year":"2018","author":"Tschannen","key":"ref42"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2015.08.104"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1016\/j.jfranklin.2017.08.014"},{"key":"ref45","first-page":"6306","article-title":"Neural discrete representation learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Oord","year":"2017"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683277"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1145\/3461615.3491114"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746698"},{"key":"ref49","first-page":"14866","article-title":"Generating diverse high-fidelity images with VQ-VAE-2","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Razavi","year":"2019"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2010.57"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref52","article-title":"Hierarchical generative modeling for controllable speech synthesis","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Hsu","year":"2019"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053520"},{"key":"ref54","doi-asserted-by":"crossref","DOI":"10.21437\/Blizzard.2021-14","article-title":"DelightfulTTS: The Microsoft speech synthesis system for Blizzard challenge 2021","author":"Liu","year":"2021"},{"key":"ref55","article-title":"Very deep VAEs generalize autoregressive models and can outperform them on images","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Child","year":"2021"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01261-8_28"},{"key":"ref57","first-page":"1","article-title":"The blizzard challenge","volume-title":"Proc. Blizzard Challenge Workshop","author":"King","year":"2011"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1972"},{"key":"ref59","article-title":"Adam: A method for stochastic optimization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kingma","year":"2015"},{"key":"ref60","first-page":"17022","article-title":"HiFi-GAN: Generative adversarial networks for efficient and high fidelity speech synthesis","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kong","year":"2020"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1016"},{"key":"ref62","article-title":"Decoupled weight decay regularization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Loshchilov","year":"2018"},{"key":"ref63","first-page":"8067","article-title":"Glow-TTS: A generative flow for text-to-speech via monotonic alignment search","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Kim","year":"2020"},{"article-title":"Discretalk: Text-to-speech as a machine translation problem","year":"2020","author":"Hayashi","key":"ref64"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414499"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-489"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU51503.2021.9688154"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.507"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2001.941023"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/PACRIM.1993.407206"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2010.5495561"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6639215"}],"container-title":["IEEE\/ACM Transactions on Audio, Speech, and Language Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6570655\/9970249\/10114504.pdf?arnumber=10114504","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,19]],"date-time":"2024-10-19T17:25:52Z","timestamp":1729358752000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10114504\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"references-count":72,"URL":"https:\/\/doi.org\/10.1109\/taslp.2023.3272470","relation":{},"ISSN":["2329-9290","2329-9304"],"issn-type":[{"type":"print","value":"2329-9290"},{"type":"electronic","value":"2329-9304"}],"subject":[],"published":{"date-parts":[[2023]]}}}