{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:24:59Z","timestamp":1775229899868,"version":"3.50.1"},"reference-count":24,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icassp43922.2022.9746107","type":"proceedings-article","created":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T19:50:34Z","timestamp":1651089034000},"page":"7482-7486","source":"Crossref","is-referenced-by-count":6,"title":["Mixer-TTS: Non-Autoregressive, Fast and Compact Text-to-Speech Model Conditioned on Language Model Embeddings"],"prefix":"10.1109","author":[{"given":"Oktai","family":"Tatanov","sequence":"first","affiliation":[{"name":"NVIDIA,Santa Clara"}]},{"given":"Stanislav","family":"Beliaev","sequence":"additional","affiliation":[{"name":"NVIDIA,Santa Clara"}]},{"given":"Boris","family":"Ginsburg","sequence":"additional","affiliation":[{"name":"NVIDIA,Santa Clara"}]}],"member":"263","reference":[{"key":"ref10","article-title":"Glow-TTS: A generative flow for text-to-speech via monotonic alignment search","author":"kim","year":"2020","journal-title":"NeurIPS"},{"key":"ref11","article-title":"RAD-TTS: Parallel flow-based TTS with robust alignment learning and diverse synthesis","author":"shih","year":"2021","journal-title":"ICML Workshop on Invertible Neural Networks Normalizing Flows and Explicit Likelihood Models"},{"key":"ref12","article-title":"One TTS alignment to rule them all","author":"badlani","year":"2021"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-3177"},{"key":"ref14","article-title":"BERT: Pretraining of deep bidirectional transformers for language understanding","author":"devlin","year":"2019","journal-title":"NAACL"},{"key":"ref15","article-title":"Improving prosody modelling with crossutterance bert embeddings for end-to-end speech synthesis","author":"xu","year":"2021","journal-title":"ICASSP"},{"key":"ref16","article-title":"MLP-Mixer: An all-MLP architecture for vision","author":"tolstikhin","year":"2021"},{"key":"ref17","article-title":"HiFi-GAN: Generative adversarial networks for efficient and high fidelity speech synthesis","author":"kong","year":"2020"},{"key":"ref18","article-title":"The LJ speech dataset","author":"ito","year":"2017"},{"key":"ref19","article-title":"Albert: A lite bert for self-supervised learning of language representations","author":"lan","year":"2020"},{"key":"ref4","article-title":"FastSpeech: Fast, robust and controllable text to speech","author":"ren","year":"2019","journal-title":"NeurIPS"},{"key":"ref3","article-title":"Deep Voice 3: 2000-speaker neural text-to-speech","author":"ping","year":"2018","journal-title":"ICLRE"},{"key":"ref6","article-title":"Fastpitch: Parallel text-to-speech with pitch prediction","author":"la?cucki","year":"2021","journal-title":"ICASSP"},{"key":"ref5","article-title":"FastSpeech 2: Fast and high-quality end-to-end text to speech","author":"ren","year":"2020"},{"key":"ref8","article-title":"Deep Voice: Real-time neural text-to-speech","author":"arik","year":"2017"},{"key":"ref7","article-title":"Talknet: Nonautoregressive depth-wise separable convolutional model for speech synthesis with explicit pitch and duration prediction","author":"beliaev","year":"2021","journal-title":"InterSpeech"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"ref1","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2017-1452","article-title":"Tacotron: Towards end-to-end speech synthesis","author":"wang","year":"2017","journal-title":"InterSpeech"},{"key":"ref9","article-title":"Deep Voice 2: Multispeaker neural text-to-speech","author":"gibiansky","year":"2017","journal-title":"NIPS"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-demos.6"},{"key":"ref22","article-title":"Large batch optimization for deep learning: Training bert in 76 minutes","author":"you","year":"2019"},{"key":"ref21","doi-asserted-by":"crossref","DOI":"10.25080\/Majora-7b98e3ed-003","article-title":"Librosa: Audio and music signal analysis in Python","author":"mcfee","year":"2015","journal-title":"the 14th Python in Science Conference"},{"key":"ref24","article-title":"Nemo: a toolkit for building ai applications using neural modules","author":"kuchaiev","year":"2019"},{"key":"ref23","article-title":"Mixed precision training","author":"micikevicius","year":"2017"}],"event":{"name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Singapore, Singapore","start":{"date-parts":[[2022,5,23]]},"end":{"date-parts":[[2022,5,27]]}},"container-title":["ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9745891\/9746004\/09746107.pdf?arnumber=9746107","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,22]],"date-time":"2022-08-22T20:11:22Z","timestamp":1661199082000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9746107\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":24,"URL":"https:\/\/doi.org\/10.1109\/icassp43922.2022.9746107","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}