{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T19:38:41Z","timestamp":1757619521723,"version":"3.44.0"},"publisher-location":"Singapore","reference-count":23,"publisher":"Springer Nature Singapore","isbn-type":[{"type":"print","value":"9789819500130"},{"type":"electronic","value":"9789819500147"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-981-95-0014-7_15","type":"book-chapter","created":{"date-parts":[[2025,7,24]],"date-time":"2025-07-24T10:06:25Z","timestamp":1753351585000},"page":"173-184","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["EmbSpeech: A Unified Framework Towards Low-Resource Zero-Shot Speech Synthesis"],"prefix":"10.1007","author":[{"given":"Fan","family":"Huang","sequence":"first","affiliation":[]},{"given":"Dong","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Chengxin","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Zhengxuan","family":"Song","sequence":"additional","affiliation":[]},{"given":"Ge","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Kun","family":"Zeng","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,7,25]]},"reference":[{"key":"15_CR1","unstructured":"Casanova, E., Weber, J., Shulby, C.D., et al.: Yourtts: towards zero-shot multi-speaker tts and zero-shot voice conversion for everyone. In: ICML 2022. vol. 162, pp. 2709\u20132720 (2022)"},{"key":"15_CR2","doi-asserted-by":"crossref","unstructured":"Guo, H., Liu, C., Ishi, C.T., et al.: X-e-speech: joint training framework of non-autoregressive cross-lingual emotional text-to-speech and voice conversion. In: Interspeech 2024. pp. 4983\u20134987 (2024)","DOI":"10.21437\/Interspeech.2024-589"},{"key":"15_CR3","first-page":"3451","volume":"29","author":"W Hsu","year":"2021","unstructured":"Hsu, W., Bolte, B., Tsai, Y.H., et al.: Hubert: self-supervised speech representation learning by masked prediction of hidden units. TASLP 29, 3451\u20133460 (2021)","journal-title":"TASLP"},{"key":"15_CR4","doi-asserted-by":"crossref","unstructured":"Huang, F., Zeng, K., Zhu, W.: Diffvc+: Improving diffusion-based voice conversion for speaker anonymization. Interspeech 2024. pp. 4453\u20134457 (2024)","DOI":"10.21437\/Interspeech.2024-502"},{"key":"15_CR5","unstructured":"Ito, K., Johnson, L.: The lj speech dataset (2017). https:\/\/keithito.com\/LJ-Speech-Dataset\/"},{"key":"15_CR6","unstructured":"Jia, Y., Zhang, Y., Weiss, R.J., et al.: Transfer learning from speaker verification to multispeaker text-to-speech synthesis. NeurIPS 2018. 31, 485\u20134495 (2018)"},{"key":"15_CR7","doi-asserted-by":"crossref","unstructured":"Kharitonov, E., Copet, J., Lakhotia, K., et al.: textless-lib: a library for textless spoken language processing. CoRR abs\/2202.07359 (2022)","DOI":"10.18653\/v1\/2022.naacl-demo.1"},{"key":"15_CR8","doi-asserted-by":"crossref","unstructured":"Kim, H., Kim, S., Yeom, J., et al.: Unitspeech: speaker-adaptive speech synthesis with untranscribed data. Interspeech 2023. pp. 3038\u20133042 (2023)","DOI":"10.21437\/Interspeech.2023-2326"},{"issue":"33","key":"15_CR9","first-page":"17022","volume":"2020","author":"J Kong","year":"2020","unstructured":"Kong, J., Kim, J., Bae, J.: Hifi-gan: Generative adversarial networks for efficient and high fidelity speech synthesis. NeurIPS 2020(33), 17022\u201317033 (2020)","journal-title":"NeurIPS"},{"key":"15_CR10","doi-asserted-by":"crossref","unstructured":"Kreuk, F., Polyak, A., Copet, J., et al.: Textless speech emotion conversion using discrete & decomposed representations. In: EMNLP 2022. pp. 11200\u201311214 (2022)","DOI":"10.18653\/v1\/2022.emnlp-main.769"},{"key":"15_CR11","doi-asserted-by":"crossref","unstructured":"Maimon, G., Adi, Y.: Speaking style conversion in the waveform domain using discrete self-supervised units. In: EMNLP 2023. pp. 8048\u20138061 (2023)","DOI":"10.18653\/v1\/2023.findings-emnlp.541"},{"key":"15_CR12","unstructured":"Popov, V., Vovk, I., Gogoryan, V., et al.: Grad-tts: A diffusion probabilistic model for text-to-speech. ICML 2021. 139, 8599\u20138608 (2021)"},{"key":"15_CR13","unstructured":"Popov, V., Vovk, I., Gogoryan, V., et al.: Diffusion-based voice conversion with fast maximum likelihood sampling scheme. ICLR 2022 (2022)"},{"key":"15_CR14","unstructured":"Qian, K., Zhang, Y., Gao, H., et al.: Contentvec: an improved self-supervised speech representation by disentangling speakers. ICML 2022. 162, 18003\u201318017 (2022)"},{"key":"15_CR15","unstructured":"Radford, A., Kim, J.W., Xu, T., et al.: Robust speech recognition via large-scale weak supervision. ICML 2023. 202, 28492\u201328518 (2023)"},{"key":"15_CR16","doi-asserted-by":"crossref","unstructured":"Saeki, T., Xin, D., Nakata, W., et al.: Utmos: Utokyo-sarulab system for voicemos challenge 2022. Interspeech 2022. pp. 4521\u20134525 (2022)","DOI":"10.21437\/Interspeech.2022-439"},{"key":"15_CR17","doi-asserted-by":"crossref","unstructured":"Shi, Y., Bu, H., Xu, X., et al.: Aishell-3: a multi-speaker mandarin tts corpus. Interspeech 2021. pp. 2756\u20132760 (2021)","DOI":"10.21437\/Interspeech.2021-755"},{"key":"15_CR18","doi-asserted-by":"crossref","unstructured":"Van Niekerk, B., Carbonneau, M., Za\u00efdi, J., et al.: A comparison of discrete and soft speech units for improved voice conversion. ICASSP 2022. pp. 6562\u20136566 (2022)","DOI":"10.1109\/ICASSP43922.2022.9746484"},{"key":"15_CR19","unstructured":"Veaux, C., Yamagishi, J., MacDonald, K., et al.: Superseded-cstr vctk corpus: english multi-speaker corpus for cstr voice cloning toolkit (2016)"},{"key":"15_CR20","doi-asserted-by":"crossref","unstructured":"Yan, Y., Tan, X., Li, B., et al.: Adaspeech 2: adaptive text to speech with untranscribed data. ICASSP 2021. pp. 6613\u20136617 (2021)","DOI":"10.1109\/ICASSP39728.2021.9414872"},{"key":"15_CR21","doi-asserted-by":"crossref","unstructured":"Zen, H., Dang, V., Clark, R., et al.: Libritts: a corpus derived from librispeech for text-to-speech. Interspeech 2019. pp. 1526\u20131530 (2019)","DOI":"10.21437\/Interspeech.2019-2441"},{"key":"15_CR22","doi-asserted-by":"crossref","unstructured":"Zhang, M., Wang, X., Fang, F., et al.: Joint training framework for text-to-speech and voice conversion using multi-source tacotron and wavenet. Interspeech 2019. pp. 1298\u20131302 (2019)","DOI":"10.21437\/Interspeech.2019-1357"},{"key":"15_CR23","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1016\/j.specom.2021.11.006","volume":"137","author":"K Zhou","year":"2022","unstructured":"Zhou, K., Sisman, B., Liu, R., et al.: Emotional voice conversion: theory, databases and esd. Speech Commun. 137, 1\u201318 (2022)","journal-title":"Speech Commun."}],"container-title":["Lecture Notes in Computer Science","Advanced Intelligent Computing Technology and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-981-95-0014-7_15","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,7]],"date-time":"2025-09-07T21:34:59Z","timestamp":1757280899000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-981-95-0014-7_15"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9789819500130","9789819500147"],"references-count":23,"URL":"https:\/\/doi.org\/10.1007\/978-981-95-0014-7_15","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"25 July 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICIC","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Intelligent Computing","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Ningbo","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"China","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 July 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 July 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"21","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icic2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"http:\/\/www.ic-icc.cn\/icg\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}