{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,25]],"date-time":"2025-03-25T18:24:53Z","timestamp":1742927093801,"version":"3.40.3"},"publisher-location":"Cham","reference-count":28,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031666933"},{"type":"electronic","value":"9783031666940"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-66694-0_5","type":"book-chapter","created":{"date-parts":[[2024,8,22]],"date-time":"2024-08-22T06:09:21Z","timestamp":1724306961000},"page":"72-83","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Towards Natural-Sounding Speech to Text in\u00a0English"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-2730-0544","authenticated-orcid":false,"given":"Kriss","family":"Saulitis","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9813-0548","authenticated-orcid":false,"given":"Evalds","family":"Urtans","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0893-8998","authenticated-orcid":false,"given":"Vairis","family":"Caune","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,8,21]]},"reference":[{"key":"5_CR1","unstructured":"Ar\u0131k, S.\u00d6., et\u00a0al.: Deep voice: real-time neural text-to-speech. In: International Conference on Machine Learning, pp. 195\u2013204. PMLR (2017)"},{"key":"5_CR2","unstructured":"Casanova, E., Weber, J., Shulby, C.D., Junior, A.C., G\u00f6lge, E., Ponti, M.A.: Yourtts: towards zero-shot multi-speaker TTS and zero-shot voice conversion for everyone. In: International Conference on Machine Learning, pp. 2709\u20132720. PMLR (2022)"},{"key":"5_CR3","unstructured":"Chen, L.W., Watanabe, S., Rudnicky, A.I.: A vector quantized approach for text to speech synthesis on real-world spontaneous speech. arXiv abs\/2302.04215 (2023). https:\/\/api.semanticscholar.org\/CorpusID:256662411"},{"key":"5_CR4","unstructured":"Chen, N., Zhang, Y., Zen, H., Weiss, R.J., Norouzi, M., Chan, W.: Wavegrad: estimating gradients for waveform generation. arXiv preprint arXiv:2009.00713 (2020). http:\/\/arxiv.org\/abs\/2009.00713"},{"key":"5_CR5","unstructured":"D\u2019Alessandro, N., Sebbe, R., Bozkurt, B., Dutoit, T.: Maxmbrola: a max\/msp mbrola-based tool for real-time voice synthesis. In: 2005 13th European Signal Processing Conference, pp.\u00a01\u20134. IEEE (2005)"},{"key":"5_CR6","unstructured":"Donahue, C., McAuley, J., Puckette, M.: Adversarial audio synthesis. arXiv preprint arXiv:1802.04208 (2018). http:\/\/arxiv.org\/abs\/1802.04208"},{"key":"5_CR7","doi-asserted-by":"crossref","unstructured":"Du, C., Guo, Y., Chen, X., Yu, K.: VQTTS: high-fidelity text-to-speech synthesis with self-supervised VQ acoustic feature. arXiv abs\/2204.00768 (2022). https:\/\/api.semanticscholar.org\/CorpusID:247939783","DOI":"10.21437\/Interspeech.2022-489"},{"key":"5_CR8","unstructured":"Duddington, J.: eSpeak text to speech (1995). https:\/\/espeak.sourceforge.net"},{"key":"5_CR9","first-page":"8067","volume":"33","author":"J Kim","year":"2020","unstructured":"Kim, J., Kim, S., Kong, J., Yoon, S.: Glow-TTS: a generative flow for text-to-speech via monotonic alignment search. Adv. Neural. Inf. Process. Syst. 33, 8067\u20138077 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"5_CR10","unstructured":"Kim, J., Kong, J., Son, J.: Conditional variational autoencoder with adversarial learning for end-to-end text-to-speech. In: International Conference on Machine Learning, pp. 5530\u20135540. PMLR (2021). https:\/\/api.semanticscholar.org\/CorpusID:235417304"},{"key":"5_CR11","unstructured":"Kim, S., Kim, H., Yoon, S.H.: Guided-TTS 2: a diffusion model for high-quality adaptive text-to-speech with untranscribed data. arXiv abs\/2205.15370 (2022). https:\/\/api.semanticscholar.org\/CorpusID:249209915"},{"key":"5_CR12","doi-asserted-by":"crossref","unstructured":"Kong, J., Park, J., Kim, B., Kim, J., Kong, D., Kim, S.: VITS2: improving quality and efficiency of single-stage text-to-speech with adversarial learning and architecture design. arXiv preprint arXiv:2307.16430 (2023). http:\/\/arxiv.org\/abs\/2307.16430","DOI":"10.21437\/Interspeech.2023-534"},{"key":"5_CR13","doi-asserted-by":"crossref","unstructured":"Li, N., Liu, S., Liu, Y., Zhao, S., Liu, M.: Neural speech synthesis with transformer network. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a033, pp. 6706\u20136713 (2019)","DOI":"10.1609\/aaai.v33i01.33016706"},{"key":"5_CR14","doi-asserted-by":"publisher","unstructured":"Lim, D., Jung, S., Kim, E.: JETS: jointly training fastspeech2 and HIFI-GAN for end to end text to speech, pp. 21\u201325 (2022). https:\/\/doi.org\/10.21437\/Interspeech.2022-10294","DOI":"10.21437\/Interspeech.2022-10294"},{"key":"5_CR15","doi-asserted-by":"crossref","unstructured":"Liu, Y., Xue, R., He, L., Tan, X., Zhao, S.: Delightfultts 2: end-to-end speech synthesis with adversarial vector-quantized auto-encoders. In: Interspeech (2022). https:\/\/api.semanticscholar.org\/CorpusID:250425685","DOI":"10.21437\/Interspeech.2022-277"},{"key":"5_CR16","doi-asserted-by":"publisher","DOI":"10.1016\/j.inffus.2023.101869","volume":"99","author":"A Mehrish","year":"2023","unstructured":"Mehrish, A., Majumder, N., Bharadwaj, R., Mihalcea, R., Poria, S.: A review of deep learning techniques for speech processing. Inf. Fusion 99, 101869 (2023)","journal-title":"Inf. Fusion"},{"key":"5_CR17","doi-asserted-by":"crossref","unstructured":"Mehta, S., Kirkland, A., Lameris, H., Beskow, J., Sz\u00e9kely, \u00c9., Henter, G.E.: Overflow: putting flows on top of neural transducers for better TTS. arXiv preprint arXiv:2211.06892 (2022). http:\/\/arxiv.org\/abs\/2211.06892","DOI":"10.21437\/Interspeech.2023-1996"},{"key":"5_CR18","unstructured":"Oord, A.V.D., et al.: Wavenet: a generative model for raw audio. arXiv preprint arXiv:1609.03499 (2016). http:\/\/arxiv.org\/abs\/1609.03499"},{"key":"5_CR19","unstructured":"Popov, V., Vovk, I., Gogoryan, V., Sadekova, T., Kudinov, M.: Grad-TTS: a diffusion probabilistic model for text-to-speech. In: International Conference on Machine Learning, pp. 8599\u20138608. PMLR (2021)"},{"key":"5_CR20","unstructured":"Ren, Y., et al.: Fastspeech 2: fast and high-quality end-to-end text to speech. arXiv abs\/2006.04558 (2020). https:\/\/api.semanticscholar.org\/CorpusID:219531522"},{"key":"5_CR21","doi-asserted-by":"publisher","first-page":"365","DOI":"10.1023\/A:1025708916924","volume":"6","author":"M Schr\u00f6der","year":"2003","unstructured":"Schr\u00f6der, M., Trouvain, J.: The German text-to-speech synthesis system MARY: a tool for research, development and teaching. Int. J. Speech Technol. 6, 365\u2013377 (2003)","journal-title":"Int. J. Speech Technol."},{"key":"5_CR22","doi-asserted-by":"crossref","unstructured":"Shen, J., et\u00a0al.: Natural TTS synthesis by conditioning wavenet on mel spectrogram predictions. In: 2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 4779\u20134783. IEEE (2018)","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"5_CR23","unstructured":"Shen, K., et al.: Naturalspeech 2: latent diffusion models are natural and zero-shot speech and singing synthesizers. arXiv abs\/2304.09116 (2023). https:\/\/api.semanticscholar.org\/CorpusID:258187322"},{"key":"5_CR24","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-99-0827-1","volume-title":"Neural Text-to-Speech Synthesis","author":"X Tan","year":"2023","unstructured":"Tan, X.: Neural Text-to-Speech Synthesis. Springer, Singapore (2023). https:\/\/doi.org\/10.1007\/978-981-99-0827-1"},{"key":"5_CR25","unstructured":"Tan, X., et al.: Naturalspeech: end-to-end text to speech synthesis with human-level quality. arXiv abs\/2205.04421 (2022). https:\/\/api.semanticscholar.org\/CorpusID:248572487"},{"key":"5_CR26","unstructured":"Wang, Y., et\u00a0al.: Tacotron: towards end-to-end speech synthesis. arXiv preprint arXiv:1703.10135 (2017). http:\/\/arxiv.org\/abs\/1703.10135"},{"key":"5_CR27","unstructured":"Xue, R., et al.: Foundationtts: text-to-speech for ASR custmization with generative language model. arXiv preprint arXiv:2303.02939 (2023). http:\/\/arxiv.org\/abs\/2303.02939"},{"key":"5_CR28","doi-asserted-by":"crossref","unstructured":"Ye, Z., Xue, W., Tan, X., Chen, J., Fei Liu, Q., Guo, Y.T.: Comospeech: one-step speech and singing voice synthesis via consistency model. In: Proceedings of the 31st ACM International Conference on Multimedia (2023). https:\/\/api.semanticscholar.org\/CorpusID:258615270","DOI":"10.1145\/3581783.3612061"}],"container-title":["Communications in Computer and Information Science","Deep Learning Theory and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-66694-0_5","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,22]],"date-time":"2024-08-22T06:10:24Z","timestamp":1724307024000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-66694-0_5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031666933","9783031666940"],"references-count":28,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-66694-0_5","relation":{},"ISSN":["1865-0929","1865-0937"],"issn-type":[{"type":"print","value":"1865-0929"},{"type":"electronic","value":"1865-0937"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"21 August 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"DeLTA","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Deep Learning Theory and Applications","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Dijon","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"France","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"10 July 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"11 July 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"5","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"delta2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/delta.scitevents.org\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}