{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:27:50Z","timestamp":1775230070605,"version":"3.50.1"},"reference-count":93,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"British Broadcasting Corporation Research and Development"},{"DOI":"10.13039\/501100000266","name":"Engineering and Physical Sciences Research Council","doi-asserted-by":"publisher","award":["EP\/T019751\/1"],"award-info":[{"award-number":["EP\/T019751\/1"]}],"id":[{"id":"10.13039\/501100000266","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Centre for Vision, Speech and Signal Processing"},{"name":"Faculty of Engineering and Physical Science"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE\/ACM Trans. Audio Speech Lang. Process."],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/taslp.2024.3399607","type":"journal-article","created":{"date-parts":[[2024,5,13]],"date-time":"2024-05-13T17:36:54Z","timestamp":1715621814000},"page":"2871-2883","source":"Crossref","is-referenced-by-count":124,"title":["AudioLDM 2: Learning Holistic Audio Generation With Self-Supervised Pretraining"],"prefix":"10.1109","volume":"32","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-1036-7888","authenticated-orcid":false,"given":"Haohe","family":"Liu","sequence":"first","affiliation":[{"name":"Centre for Vision, Speech and Signal Processing (CVSSP), University of Surrey, Guilford, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6887-0956","authenticated-orcid":false,"given":"Yi","family":"Yuan","sequence":"additional","affiliation":[{"name":"Centre for Vision, Speech and Signal Processing (CVSSP), University of Surrey, Guilford, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-9950-2672","authenticated-orcid":false,"given":"Xubo","family":"Liu","sequence":"additional","affiliation":[{"name":"Centre for Vision, Speech and Signal Processing (CVSSP), University of Surrey, Guilford, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6079-5130","authenticated-orcid":false,"given":"Xinhao","family":"Mei","sequence":"additional","affiliation":[{"name":"Centre for Vision, Speech and Signal Processing (CVSSP), University of Surrey, Guilford, U.K."}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2864-0475","authenticated-orcid":false,"given":"Qiuqiang","family":"Kong","sequence":"additional","affiliation":[{"name":"Department of Electronic Engineering, Chinese University of Hong Kong, Hong Kong, SAR, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4078-1273","authenticated-orcid":false,"given":"Qiao","family":"Tian","sequence":"additional","affiliation":[{"name":"Speech, Audio &amp; Music Intelligence (SAMI) Group, ByteDance Inc., Beijing, China"}]},{"given":"Yuping","family":"Wang","sequence":"additional","affiliation":[{"name":"Speech, Audio &amp; Music Intelligence (SAMI) Group, ByteDance Inc., Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8393-5703","authenticated-orcid":false,"given":"Wenwu","family":"Wang","sequence":"additional","affiliation":[{"name":"Centre for Vision, Speech and Signal Processing (CVSSP), University of Surrey, Guilford, U.K."}]},{"given":"Yuxuan","family":"Wang","sequence":"additional","affiliation":[{"name":"Speech, Audio &amp; Music Intelligence (SAMI) Group, ByteDance Inc., Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9708-1075","authenticated-orcid":false,"given":"Mark D.","family":"Plumbley","sequence":"additional","affiliation":[{"name":"Centre for Vision, Speech and Signal Processing (CVSSP), University of Surrey, Guilford, U.K."}]}],"member":"263","reference":[{"key":"ref1","article-title":"A comprehensive survey of AI-generated content: A history of generative AI from GAN to ChatGPT","author":"Cao","year":"2023"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3356232"},{"key":"ref3","article-title":"AudioGen: Textually guided audio generation","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kreuk","year":"2022"},{"key":"ref4","first-page":"21450","article-title":"AudioLDM: Text-to-audio generation with latent diffusion models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Liu","year":"2023"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.1055"},{"key":"ref6","article-title":"Riffusion: Stable diffusion for real-time music generation, 2022","author":"Forsgren","year":"2022"},{"key":"ref7","article-title":"WavJourney: Compositional audio creation with large language models","author":"Liu","year":"2023"},{"key":"ref8","article-title":"MusicLM: Generating music from text","author":"Agostinelli","year":"2023"},{"key":"ref9","first-page":"51","article-title":"Expressive sonification of footstep sounds","volume-title":"Proc. Interactive Sonification Workshop","author":"Bresin","year":"2010"},{"key":"ref10","article-title":"DDSP: Differentiable digital signal processing","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Engel","year":"2020"},{"key":"ref11","article-title":"FastSpeech 2: Fast and high-quality end-to-end text to speech","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Ren","year":"2021"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TENCON.2016.7848007"},{"key":"ref13","first-page":"17450","article-title":"Efficient neural music generation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Lam","year":"2023"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3268730"},{"key":"ref15","first-page":"13916","article-title":"Make-an-Audio: Text-to-audio generation with prompt-enhanced diffusion models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Huang","year":"2023"},{"key":"ref16","article-title":"VoiceFixer: Toward general speech restoration with neural vocoder","author":"Liu","year":"2021"},{"key":"ref17","first-page":"1298","article-title":"Data2Vec: A general framework for self-supervised learning in speech, vision and language","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Baevski","year":"2022"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01457"},{"key":"ref19","article-title":"Universal source separation with weakly labelled data","author":"Kong","year":"2023"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1561\/116.00000049"},{"key":"ref21","first-page":"28708","article-title":"Masked autoencoders that listen","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Xu","year":"2022"},{"key":"ref22","article-title":"Language models are unsupervised multitask learners","author":"Radford","year":"2019"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref24","article-title":"A survey of large language models","author":"Zhao","year":"2023"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3129994"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3288409"},{"key":"ref27","first-page":"119","article-title":"AudioCaps: Generating captions for audios in the wild","volume-title":"Proc. 2019 Conf. North Amer. Chapter Assoc. Comput. Linguistics: Hum. Lang. Technol.","author":"Kim","year":"2019"},{"key":"ref28","article-title":"The LJSpeech dataset","author":"Ito","year":"2017"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096023"},{"key":"ref30","article-title":"Taming visually guided sound generation","volume-title":"Proc. Brit. Mach. Vis. Conf.","author":"Iashin","year":"2021"},{"key":"ref31","first-page":"8599","article-title":"Grad-TTS: A diffusion probabilistic model for text-to-speech","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Popov","year":"2021"},{"key":"ref32","first-page":"8067","article-title":"Glow-TTS: A generative flow for text-to-speech via monotonic alignment search","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Kim","year":"2020"},{"key":"ref33","article-title":"Noise2Music: Text-conditioned music generation with diffusion models","author":"Huang","year":"2023"},{"key":"ref34","first-page":"47704","article-title":"Simple and controllable music generation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"36","author":"Copet","year":"2023"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU51503.2021.9688253"},{"key":"ref36","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Ho","year":"2020"},{"key":"ref37","article-title":"Score-based generative modeling through stochastic differential equations","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Song","year":"2021"},{"key":"ref38","first-page":"8780","article-title":"Diffusion models beat GANs on image synthesis","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Dhariwal","year":"2021"},{"key":"ref39","article-title":"Hierarchical text-conditional image generation with CLIP latents","author":"Ramesh","year":"2022"},{"key":"ref40","first-page":"36479","article-title":"Photorealistic text-to-image diffusion models with deep language understanding","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Saharia","year":"2022"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3204461"},{"key":"ref42","article-title":"WaveGrad: Estimating gradients for waveform generation","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Chen","year":"2021"},{"key":"ref43","article-title":"DiffWave: A versatile diffusion model for audio synthesis","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kong","year":"2021"},{"key":"ref44","first-page":"23689","article-title":"BinauralGrad: A two-stage conditional diffusion probabilistic model for binaural audio synthesis","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Leng","year":"2022"},{"key":"ref45","article-title":"Make-a-video: Text-to-video generation without text-video data","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Singer","year":"2022"},{"key":"ref46","article-title":"Imagen video: High definition video generation with diffusion models","author":"Ho","year":"2022"},{"key":"ref47","article-title":"ResGrad: Residual denoising diffusion probabilistic models for text to speech","author":"Chen","year":"2022"},{"key":"ref48","article-title":"Bilateral denoising diffusion models","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lam","year":"2022"},{"key":"ref49","article-title":"PriorGrad: Improving conditional denoising diffusion models with data-driven adaptive prior","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lee","year":"2022"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746690"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3612348"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096211"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i12.29294"},{"key":"ref54","first-page":"125","article-title":"WaveNet: A generative model for raw audio","volume-title":"Proc. ISCA Speech Synth. Workshop","author":"Oord","year":"2016"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i20.30271"},{"key":"ref56","first-page":"12449","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Baevski","year":"2020"},{"key":"ref57","article-title":"Auto-encoding variational bayes","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kingma","year":"2014"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2010.11929"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/icassp.2017.7952261"},{"key":"ref60","article-title":"MERT: Acoustic music understanding model with large-scale self-supervised training","author":"Li","year":"2023"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1873"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN52387.2021.9534474"},{"key":"ref64","first-page":"17022","article-title":"HiFi-GAN: Generative adversarial networks for efficient and high fidelity speech synthesis","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Kong","year":"2020"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1145\/2733373.2806390"},{"key":"ref66","first-page":"2579","article-title":"Visualizing data using t-SNE","volume":"9","author":"Maaten","year":"2008","journal-title":"J. Mach. Learn. Res."},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/ICEIEC49280.2020.9152352"},{"key":"ref68","article-title":"Learning to answer by learning to ask: Getting the best of GPT-2 and BERT worlds","author":"Klein","year":"2019"},{"key":"ref69","article-title":"Professor forcing: A new algorithm for training recurrent networks","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Lamb","year":"2016"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1007\/s10462-012-9338-y"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095969"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10097117"},{"key":"ref73","first-page":"1","article-title":"Scaling instruction-finetuned language models","volume-title":"J. Mach. Learn. Res.","volume":"25","author":"Chung","year":"2024"},{"issue":"1","key":"ref74","first-page":"5485","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1007\/978-981-99-0827-1"},{"key":"ref76","article-title":"Auto-encoding variational Bayes","author":"Kingma","year":"2013"},{"key":"ref77","article-title":"Attention is all you need","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst.","author":"Vaswani","year":"2017"},{"key":"ref78","article-title":"Classifier-free diffusion guidance","volume-title":"Proc. NeurIPS Workshop Deep Generative Models Downstream Appl.","author":"Ho","year":"2021"},{"key":"ref79","first-page":"16784","article-title":"GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Nichol","year":"2022"},{"key":"ref80","article-title":"WavCaps: A ChatGPT-assisted weakly-labelled audio captioning dataset for audio-language multimodal research","author":"Mei","year":"2023"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053174"},{"key":"ref82","first-page":"316","article-title":"FMA: A dataset for music analysis","volume-title":"Proc. Int. Soc. Music Inf. Retrieval Conf.","author":"Defferrard","year":"2017"},{"key":"ref83","first-page":"591","article-title":"The million song dataset","volume-title":"Proc. Int. Soc. Music Inf. Retrieval Conf.","author":"Bertin-Mahieux","year":"2011"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1965"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10094670"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952132"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2022-227"},{"key":"ref88","first-page":"5","article-title":"A technique for the measurement of attitudes","volume":"140","author":"Likert","year":"1932","journal-title":"Arch. Psychol."},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096876"},{"key":"ref90","article-title":"Denoising diffusion implicit models","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Song","year":"2020"},{"key":"ref91","article-title":"Decoupled weight decay regularization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Loshchilov","year":"2019"},{"key":"ref92","article-title":"Make-an-Audio 2: Temporal-enhanced text-to-audio generation","author":"Huang","year":"2023"},{"key":"ref93","article-title":"Mousai: Text-to-music generation with long-context latent diffusion","author":"Schneider","year":"2023"}],"container-title":["IEEE\/ACM Transactions on Audio, Speech, and Language Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6570655\/10304349\/10530074.pdf?arnumber=10530074","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,6,1]],"date-time":"2024-06-01T04:49:19Z","timestamp":1717217359000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10530074\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":93,"URL":"https:\/\/doi.org\/10.1109\/taslp.2024.3399607","relation":{},"ISSN":["2329-9290","2329-9304"],"issn-type":[{"value":"2329-9290","type":"print"},{"value":"2329-9304","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}