{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T18:43:43Z","timestamp":1776883423420,"version":"3.51.2"},"reference-count":25,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10447815","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"12662-12666","source":"Crossref","is-referenced-by-count":2,"title":["Improving Language Model-Based Zero-Shot Text-to-Speech Synthesis with Multi-Scale Acoustic Prompts"],"prefix":"10.1109","author":[{"given":"Shun","family":"Lei","sequence":"first","affiliation":[{"name":"Tsinghua University,Shenzhen International Graduate School,Shenzhen"}]},{"given":"Yixuan","family":"Zhou","sequence":"additional","affiliation":[{"name":"Tsinghua University,Shenzhen International Graduate School,Shenzhen"}]},{"given":"Liyang","family":"Chen","sequence":"additional","affiliation":[{"name":"Tsinghua University,Shenzhen International Graduate School,Shenzhen"}]},{"given":"Dan","family":"Luo","sequence":"additional","affiliation":[{"name":"Tsinghua University,Shenzhen International Graduate School,Shenzhen"}]},{"given":"Zhiyong","family":"Wu","sequence":"additional","affiliation":[{"name":"Tsinghua University,Shenzhen International Graduate School,Shenzhen"}]},{"given":"Xixin","family":"Wu","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong"}]},{"given":"Shiyin","family":"Kang","sequence":"additional","affiliation":[{"name":"Skywork AI PTE. LTD.,Beijing"}]},{"given":"Tao","family":"Jiang","sequence":"additional","affiliation":[{"name":"Skywork AI PTE. LTD.,Beijing"}]},{"given":"Yahui","family":"Zhou","sequence":"additional","affiliation":[{"name":"Skywork AI PTE. LTD.,Beijing"}]},{"given":"Yuxing","family":"Han","sequence":"additional","affiliation":[{"name":"Tsinghua University,Shenzhen International Graduate School,Shenzhen"}]},{"given":"Helen","family":"Meng","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461368"},{"key":"ref2","article-title":"Fastspeech 2: Fast and high-quality end-to-end text to speech","volume-title":"International Conference on Learning Representations","author":"Ren"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3139"},{"key":"ref4","first-page":"5530","article-title":"Conditional variational autoencoder with adversarial learning for end-toend text-to-speech","volume-title":"International Conference on Machine Learning","author":"Kim"},{"key":"ref5","article-title":"A survey on neural speech synthesis","author":"Tan","year":"2021"},{"key":"ref6","article-title":"Sample efficient adaptive text-to-speech","volume-title":"International Conference on Learning Representations","author":"Chen"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1705"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054301"},{"key":"ref9","article-title":"Adaspeech: Adaptive text to speech for custom voice","volume-title":"International Conference on Learning Representations","author":"Chen"},{"key":"ref10","article-title":"Transfer learning from speaker verification to multispeaker text-to-speech synthesis","volume":"31","author":"Jia","year":"2018","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054535"},{"key":"ref12","first-page":"2709","article-title":"YourTTS: Towards zero-shot multi-speaker tts and zero-shot voice conversion for everyone","volume-title":"International Conference on Machine Learning","author":"Casanova"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2096"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10054"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3288409"},{"key":"ref16","article-title":"Neural codec language models are zero-shot text to speech synthesizers","author":"Wang","year":"2023"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00618"},{"key":"ref18","article-title":"High fidelity neural audio compression","author":"D\u00e9fossez","year":"2022"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3129994"},{"key":"ref20","first-page":"1877","article-title":"Language models are few-shot learners","volume":"33","author":"Brown","year":"2020","journal-title":"Advances in neural information processing systems"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3301217"},{"key":"ref22","article-title":"Mega-TTS: Zero-shot text-to-speech at scale with intrinsic inductive bias","author":"Jiang","year":"2023"},{"key":"ref23","article-title":"NaturalSpeech 2: Latent diffusion models are natural and zero-shot speech and singing synthesizers","author":"Shen","year":"2023"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-947"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2441"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Seoul, Korea, Republic of","start":{"date-parts":[[2024,4,14]]},"end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10447815.pdf?arnumber=10447815","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T06:13:11Z","timestamp":1722579191000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10447815\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10447815","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}