{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,25]],"date-time":"2026-04-25T14:54:31Z","timestamp":1777128871534,"version":"3.51.4"},"reference-count":36,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,12,2]]},"DOI":"10.1109\/slt61566.2024.10832255","type":"proceedings-article","created":{"date-parts":[[2025,1,16]],"date-time":"2025-01-16T18:31:27Z","timestamp":1737052287000},"page":"879-884","source":"Crossref","is-referenced-by-count":22,"title":["Amphion: an Open-Source Audio, Music, and Speech Generation Toolkit"],"prefix":"10.1109","author":[{"given":"Xueyao","family":"Zhang","sequence":"first","affiliation":[{"name":"The Chinese University of Hong Kong,Shenzhen,China"}]},{"given":"Liumeng","family":"Xue","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Shenzhen,China"}]},{"given":"Yicheng","family":"Gu","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Shenzhen,China"}]},{"given":"Yuancheng","family":"Wang","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Shenzhen,China"}]},{"given":"Jiaqi","family":"Li","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Shenzhen,China"}]},{"given":"Haorui","family":"He","sequence":"additional","affiliation":[{"name":"Shenzhen Reseach Institute of Big Data,Shenzhen,China"}]},{"given":"Chaoren","family":"Wang","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Shenzhen,China"}]},{"given":"Songting","family":"Liu","sequence":"additional","affiliation":[{"name":"Shenzhen Reseach Institute of Big Data,Shenzhen,China"}]},{"given":"Xi","family":"Chen","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Shenzhen,China"}]},{"given":"Junan","family":"Zhang","sequence":"additional","affiliation":[{"name":"Shanghai AI Laboratory,Shanghai,China"}]},{"given":"Zihao","family":"Fang","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Shenzhen,China"}]},{"given":"Haopeng","family":"Chen","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Shenzhen,China"}]},{"given":"Tze Ying","family":"Tang","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Shenzhen,China"}]},{"given":"Lexiao","family":"Zou","sequence":"additional","affiliation":[{"name":"Shenzhen Reseach Institute of Big Data,Shenzhen,China"}]},{"given":"Mingxuan","family":"Wang","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Shenzhen,China"}]},{"given":"Jun","family":"Han","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Shenzhen,China"}]},{"given":"Kai","family":"Chen","sequence":"additional","affiliation":[{"name":"Shanghai AI Laboratory,Shanghai,China"}]},{"given":"Haizhou","family":"Li","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Shenzhen,China"}]},{"given":"Zhizheng","family":"Wu","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong,Shenzhen,China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"FastSpeech 2: Fast and high-quality end-to-end text to speech","author":"Ren","year":"2020","journal-title":"ICLR"},{"key":"ref2","article-title":"Naturalspeech 2: Latent diffusion models are natural and zero-shot speech and singing synthesizers","author":"Shen","journal-title":"ICLR. 2024"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.cag.2024.104058"},{"key":"ref4","article-title":"Conditional variational autoencoder with adversarial learning for end-to-end text-to-speech","author":"Kim","year":"2021","journal-title":"ICML"},{"key":"ref5","article-title":"Neural codec language models are zero-shot text to speech synthesizers","author":"Wang","year":"2023","journal-title":"arXiv preprint arXiv:2301.02111"},{"key":"ref6","first-page":"21450","article-title":"Audioldm: Text-to-audio generation with latent diffusion models","volume":"202","author":"Liu","year":"2023","journal-title":"ICML"},{"key":"ref7","article-title":"AUDIT: Audio editing by following instructions with latent diffusion models","author":"Wang","year":"2023","journal-title":"NIPS"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/SLT61566.2024.10832319"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ISCSLP63861.2024.10800419"},{"key":"ref10","article-title":"PicoAudio: Enabling Precise Timestamp and Frequency Controllability of Audio Events in Text-to-audio Generation","author":"Xie","year":"2024","journal-title":"arXiv preprint arxiv:2407.02869"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1609.03499"},{"key":"ref12","first-page":"2415","article-title":"Efficient neural audio synthesis","volume":"80","author":"Kalchbrenner","year":"2018","journal-title":"ICML"},{"key":"ref13","article-title":"DiffWave: A versatile diffusion model for audio synthesis","author":"Kong","year":"2021","journal-title":"ICLR"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683143"},{"key":"ref15","article-title":"MelGAN: Generative adversarial networks for conditional waveform synthesis","author":"Kumar","year":"2019","journal-title":"NIPS"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2143"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095298"},{"key":"ref18","article-title":"Bigvgan: A universal neural vocoder with large-scale training","author":"Lee","year":"2023","journal-title":"ICLR"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3277276"},{"key":"ref20","article-title":"Universal MelGAN: A robust neural vocoder for high-fidelity waveform generation in multiple domains","volume":"abs\/2011.09631","author":"Jang","year":"2020","journal-title":"arXiv"},{"key":"ref21","article-title":"High fidelity neural audio compression","volume":"abs\/2210.13438","author":"D\u00e9fossez","year":"2022","journal-title":"arXiv"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448436"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3468005"},{"key":"ref24","article-title":"Naturalspeech 3: Zeroshot speech synthesis with factorized codec and diffusion models","author":"Ju","year":"2024","journal-title":"ICML"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1599"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2826"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052942"},{"key":"ref28","first-page":"119","article-title":"AudioCaps: Generating captions for audios in the wild","author":"Kim","year":"2019","journal-title":"NAACL-HLT"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2441"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3268730"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2020.3030497"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-48"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475437"},{"key":"ref35","article-title":"M4singer: A multi-style, multi-singer and musical score provided mandarin singing corpus","author":"Zhang","year":"2022","journal-title":"NIPS"},{"key":"ref36","article-title":"Emilia: An Extensive, Multilingual, and Diverse Speech Dataset for Large-Scale Speech Generation","author":"Haorui","year":"2024","journal-title":"SLT"}],"event":{"name":"2024 IEEE Spoken Language Technology Workshop (SLT)","location":"Macao","start":{"date-parts":[[2024,12,2]]},"end":{"date-parts":[[2024,12,5]]}},"container-title":["2024 IEEE Spoken Language Technology Workshop (SLT)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10830790\/10830793\/10832255.pdf?arnumber=10832255","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,2,20]],"date-time":"2025-02-20T19:42:44Z","timestamp":1740080564000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10832255\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,2]]},"references-count":36,"URL":"https:\/\/doi.org\/10.1109\/slt61566.2024.10832255","relation":{},"subject":[],"published":{"date-parts":[[2024,12,2]]}}}