{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,13]],"date-time":"2025-06-13T07:29:02Z","timestamp":1749799742474,"version":"3.37.3"},"reference-count":26,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,1,9]],"date-time":"2023-01-09T00:00:00Z","timestamp":1673222400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,1,9]],"date-time":"2023-01-09T00:00:00Z","timestamp":1673222400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100012950","name":"Inria","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100012950","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100012681","name":"CNRS","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100012681","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,1,9]]},"DOI":"10.1109\/slt54892.2023.10022766","type":"proceedings-article","created":{"date-parts":[[2023,1,27]],"date-time":"2023-01-27T18:54:03Z","timestamp":1674845643000},"page":"900-905","source":"Crossref","is-referenced-by-count":3,"title":["Can We Use Common Voice to Train a Multi-Speaker TTS System?"],"prefix":"10.1109","author":[{"given":"Sewade","family":"Ogun","sequence":"first","affiliation":[{"name":"Universit&#x00E9; de Lorraine, CNRS,Nancy,France,F-54000"}]},{"given":"Vincent","family":"Colotte","sequence":"additional","affiliation":[{"name":"Universit&#x00E9; de Lorraine, CNRS,Nancy,France,F-54000"}]},{"given":"Emmanuel","family":"Vincent","sequence":"additional","affiliation":[{"name":"Universit&#x00E9; de Lorraine, CNRS,Nancy,France,F-54000"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/icassp40776.2020.9053520"},{"key":"ref2","first-page":"6189","article-title":"Mel-lotron: Multispeaker expressive voice synthesis by con-ditioning on rhythm, pitch and global style tokens","author":"Valle","year":"2020","journal-title":"ICASSP"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747345"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1774"},{"key":"ref5","first-page":"7682","article-title":"SYNT ++: Utilizing imperfect synthetic data to im-prove speech recognition","author":"Hu","year":"2022","journal-title":"ICASSP"},{"volume-title":"CSTR VCTK Corpus: English multi-speaker corpus for CSTR voice cloning toolkit (version 0.92)","year":"2019","author":"Yamagishi","key":"ref6"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2441"},{"key":"ref8","first-page":"4211","article-title":"Common Voice: A massively-multilingual speech corpus","author":"Ardila","year":"2020","journal-title":"LREC"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/icassp.2001.941023"},{"key":"ref10","article-title":"HiFi++: A unified framework for neural vocoding, bandwidth ex-tension and speech enhancement","author":"Andreev","year":"2022","journal-title":"arXiv"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/SSW.2016-30"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639642"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-465"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413934"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/icassp.2019.8683561"},{"key":"ref17","article-title":"Style Equalization: Unsupervised learning of control-lable generative sequence models","author":"Chang","year":"2021","journal-title":"arXiv"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2019-2003"},{"article-title":"Auto-MOS: Learning a non-intrusive assessor of naturalness-of-speech","volume-title":"NIPS workshop","author":"Patton","key":"ref19"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746395"},{"key":"ref21","first-page":"7414","article-title":"Unsuper-vised pretraining transfers well across languages","author":"Riviere","year":"2020","journal-title":"ICASSP"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-1673"},{"key":"ref23","first-page":"8067","article-title":"Glow-TTS: A generative flow for text-to-speech via monotonic alignment search","volume":"33","author":"Kim","year":"2020","journal-title":"NIPS"},{"key":"ref24","first-page":"17022","article-title":"HiFi-GAN: Generative ad-versarial networks for efficient and high fidelity speech synthesis","volume":"33","author":"Kong","year":"2020","journal-title":"NIPS"},{"key":"ref25","article-title":"NeMo: A toolkit for building AI applications using neural modules","author":"Kuchaiev","year":"2019","journal-title":"arXiv"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.3109\/02699206.2011.578783"}],"event":{"name":"2022 IEEE Spoken Language Technology Workshop (SLT)","start":{"date-parts":[[2023,1,9]]},"location":"Doha, Qatar","end":{"date-parts":[[2023,1,12]]}},"container-title":["2022 IEEE Spoken Language Technology Workshop (SLT)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10022052\/10022330\/10022766.pdf?arnumber=10022766","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,13]],"date-time":"2024-02-13T05:59:26Z","timestamp":1707803966000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10022766\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,1,9]]},"references-count":26,"URL":"https:\/\/doi.org\/10.1109\/slt54892.2023.10022766","relation":{},"subject":[],"published":{"date-parts":[[2023,1,9]]}}}