{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,5,28]],"date-time":"2025-05-28T05:14:17Z","timestamp":1748409257679,"version":"3.28.0"},"reference-count":34,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10446098","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"11166-11170","source":"Crossref","is-referenced-by-count":3,"title":["Latent Filling: Latent Space Data Augmentation for Zero-Shot Speech Synthesis"],"prefix":"10.1109","author":[{"given":"Jae-Sung","family":"Bae","sequence":"first","affiliation":[{"name":"Samsung Research,Seoul,Republic of Korea"}]},{"given":"Joun Yeop","family":"Lee","sequence":"additional","affiliation":[{"name":"Samsung Research,Seoul,Republic of Korea"}]},{"given":"Ji-Hyun","family":"Lee","sequence":"additional","affiliation":[{"name":"Samsung Research,Seoul,Republic of Korea"}]},{"given":"Seongkyu","family":"Mun","sequence":"additional","affiliation":[{"name":"Samsung Research,Seoul,Republic of Korea"}]},{"given":"Taehwa","family":"Kang","sequence":"additional","affiliation":[{"name":"Samsung Research,Seoul,Republic of Korea"}]},{"given":"Hoon-Young","family":"Cho","sequence":"additional","affiliation":[{"name":"Samsung Research,Seoul,Republic of Korea"}]},{"given":"Chanwoo","family":"Kim","sequence":"additional","affiliation":[{"name":"Korea University,Seoul,Republic of Korea"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"article-title":"Fast-Speech 2: Fast and high-quality end-to-end text to speech","volume-title":"Proc. Int. Conf. on Learning Representations (ICLR)","author":"Ren","key":"ref2"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2464"},{"article-title":"Sample efficient adaptive text-to-speech","volume-title":"Proc. Int. Conf. on Learning Representations (ICLR)","author":"Chen","key":"ref4"},{"article-title":"AdaSpeech: Adaptive text to speech for custom voice","volume-title":"Proc. Int. Conf. on Learning Representations (ICLR)","author":"Chen","key":"ref5"},{"article-title":"YourTTS: Towards zero-shot multi-speaker TTS and zero-shot voice conversion for everyone","volume-title":"Proc. Int. Conf. on Machine Learning (ICML)","author":"Casanova","key":"ref6"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-225"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1774"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-901"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747388"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2022.3226655"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1128"},{"key":"ref13","article-title":"Neural codec language models are zero-shot text to speech synthesizers","volume":"abs\/2301.02111","author":"Wang","year":"2023","journal-title":"CoRR"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00618"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-11278"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413466"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10134"},{"article-title":"Dataset augmentation in feature space","volume-title":"Proc. Int. Conf. on Learning Representations (ICLR)","author":"DeVries","key":"ref18"},{"article-title":"mixup: Beyond empirical risk minimization","volume-title":"Proc. Int. Conf. on Learning Representations (ICLR)","author":"Zhang","key":"ref19"},{"article-title":"MODALS: Modality-agnostic automated data augmentation in the latent space","volume-title":"Proc. Int. Conf. on Learning Representations (ICLR)","author":"Cheung","key":"ref20"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3548365"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747707"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-897"},{"key":"ref24","article-title":"CSTR VCTK Corpus: English multi-speaker corpus for CSTR voice cloning toolkit (version 0.92)","author":"Yamagishi","year":"2019","journal-title":"University of Edinburgh. The Centre for Speech Technology Research (CSTR)"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2441"},{"article-title":"The LJ Speech dataset","year":"2017","author":"Ito","key":"ref26"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-310"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2650"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-143"},{"article-title":"Voicebox: Text-guided multilingual universal speech generation at scale","volume-title":"Proc. Advances in Neural Information Processing Systems","author":"Le","key":"ref30"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1406.3269"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052942"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1006\/jsvi.1998.2072"},{"article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"Proc. Int. Conf. on Machine Learning (ICML)","author":"Radford","key":"ref34"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2024,4,14]]},"location":"Seoul, Korea, Republic of","end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10446098.pdf?arnumber=10446098","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T04:42:14Z","timestamp":1722573734000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10446098\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":34,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10446098","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}