{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:27:14Z","timestamp":1775230034813,"version":"3.50.1"},"reference-count":61,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62271083"],"award-info":[{"award-number":["62271083"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Key Project of the National Language Commission","award":["ZDI145-81"],"award-info":[{"award-number":["ZDI145-81"]}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["2023RC73"],"award-info":[{"award-number":["2023RC73"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["2023RC13"],"award-info":[{"award-number":["2023RC13"]}],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE\/ACM Trans. Audio Speech Lang. Process."],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/taslp.2024.3485485","type":"journal-article","created":{"date-parts":[[2024,10,23]],"date-time":"2024-10-23T17:50:29Z","timestamp":1729705829000},"page":"4700-4712","source":"Crossref","is-referenced-by-count":26,"title":["Auffusion: Leveraging the Power of Diffusion and Large Language Models for Text-to-Audio Generation"],"prefix":"10.1109","volume":"32","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-0442-0932","authenticated-orcid":false,"given":"Jinlong","family":"Xue","sequence":"first","affiliation":[{"name":"School of Artificial Intelligence, Beijing University of Posts and Telecommunications, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-7642-4942","authenticated-orcid":false,"given":"Yayue","family":"Deng","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, Beijing University of Posts and Telecommunications, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5881-3723","authenticated-orcid":false,"given":"Yingming","family":"Gao","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, Beijing University of Posts and Telecommunications, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6284-5039","authenticated-orcid":false,"given":"Ya","family":"Li","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, Beijing University of Posts and Telecommunications, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-110"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2021\/549"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-emnlp.3"},{"key":"ref4","article-title":"Towards data distillation for end-to-end spoken conversational question answering","author":"You","year":"2020"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414999"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-naacl.91"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-emnlp.3"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/MLSP52302.2021.9596430"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683727"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3268730"},{"key":"ref11","first-page":"21450","article-title":"AudioLDM: Text-to-audio generation with latent diffusion models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Liu","year":"2023"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3399607"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3612348"},{"key":"ref14","first-page":"13916","article-title":"Make-an-audio: Text-to-audio generation with prompt-enhanced diffusion models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Huang","year":"2023"},{"key":"ref15","article-title":"Make-an-audio 2: Temporal-enhanced text-to-audio generation","author":"Huang","year":"2023"},{"key":"ref16","first-page":"2256","article-title":"Deep unsupervised learning using nonequilibrium thermodynamics","volume-title":"Proc. 32nd Int. Conf. Mach. Learn.","author":"Sohl-Dickstein","year":"2015"},{"key":"ref17","first-page":"6840","article-title":"Denoising diffusion probabilistic models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Ho","year":"2020"},{"key":"ref18","first-page":"140:1","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"J. Mach. Learn. Res."},{"key":"ref19","article-title":"Roberta: A robustly optimized BERT pretraining approach","author":"Liu","year":"2019"},{"key":"ref20","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095889"},{"key":"ref22","article-title":"Scaling instruction-finetuned language models","author":"Chung","year":"2022"},{"key":"ref23","article-title":"Audiogen: Textually guided audio generation","volume-title":"Proc. 11th Int. Conf. Learn. Representations","author":"Kreuk","year":"2023"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01042"},{"key":"ref25","first-page":"36479","article-title":"Photorealistic text-to-image diffusion models with deep language understanding","volume-title":"Proc. Adv. Neural Inf. Process. Syst. 35: Annu. Conf. Neural Inf. Process. Syst.","author":"Saharia","year":"2022"},{"key":"ref26","article-title":"Pixart-alpha: Fast training of diffusion transformer for photorealistic text-to-image synthesis","author":"Chen","year":"2023"},{"key":"ref27","article-title":"Riffusion - stable diffusion for real-time music generation","author":"Forsgren","year":"2022"},{"key":"ref28","article-title":"eDiff-I: Text-to-image diffusion models with an ensemble of expert denoisers","author":"Balaji","year":"2022"},{"key":"ref29","first-page":"78723","article-title":"T2I-compbench: A comprehensive benchmark for open-world compositional text-to-image generation","volume-title":"Proc. Adv. Neural Inf. Process. Syst. 36: Annu. Conf. Neural Inf. Process. Syst.","author":"Huang","year":"2023"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1810.04805"},{"key":"ref31","first-page":"8821","article-title":"Zero-shot text-to-image generation","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","author":"Ramesh","year":"2021"},{"key":"ref32","first-page":"6306","article-title":"Neural discrete representation learning","volume-title":"Adv. Neural Inf. Process. Syst. 30: Annu. Conf. Neural Inf. Process. Syst.","author":"Oord","year":"2017"},{"key":"ref33","article-title":"Hierarchical text-conditional image generation with CLIP latents","author":"Ramesh","year":"2022"},{"key":"ref34","first-page":"16784","article-title":"GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models","volume-title":"Int. Conf. Mach. Learn.","author":"Nichol","year":"2022"},{"key":"ref35","article-title":"High fidelity neural audio compression","volume":"2023","author":"Dfossez","year":"2023","journal-title":"Trans. Mach. Learn. Res."},{"key":"ref36","first-page":"28708","article-title":"Masked autoencoders that listen","volume-title":"Proc. Adv. Neural Inf. Process. Syst. 35: Annu. Conf. Neural Inf. Process. Syst. 2022","author":"Huang","year":"2022"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref38","article-title":"Prompt-to-prompt image editing with cross-attention control","volume-title":"Proc. 11th Int. Conf. Learn. Representations","author":"Hertz","year":"2023"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00585"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.310"},{"key":"ref41","article-title":"Classifier-free diffusion guidance","author":"Ho","year":"2022"},{"key":"ref42","article-title":"Sdedit: Guided image synthesis and editing with stochastic differential equations","volume-title":"Proc. 10th Int. Conf. Learn. Representations","author":"Meng","year":"2022"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1145\/3528233.3530757"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00323"},{"key":"ref46","first-page":"119","article-title":"Audiocaps: Generating captions for audios in the wild","volume-title":"Proc. Conf. North Amer. Chapter Assoc. Comput. Linguistics: Hum. Lang. Technol. 2019","author":"Kim","year":"2019"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3419446"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.23919\/EUSIPCO54536.2021.9616087"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052990"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1145\/2733373.2806390"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1145\/2647868.2655045"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/TSA.2002.800560"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414579"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952261"},{"key":"ref55","first-page":"17022","article-title":"Hifi-GAN: Generative adversarial networks for efficient and high fidelity speech synthesis","volume-title":"Proc. Adv. Neural Inf. Process. Syst. 33: Annu. Conf. Neural Inf. Process. Syst.","author":"Kong","year":"2020"},{"key":"ref56","article-title":"BigVGAN: A universal neural vocoder with large-scale training","volume-title":"Proc. 11th Int. Conf. Learn. Representations","author":"Lee","year":"2023"},{"key":"ref57","article-title":"Decoupled weight decay regularization","volume-title":"Proc. 7th Int. Conf. Learn. Representations","author":"Loshchilov","year":"2019"},{"key":"ref58","first-page":"6626","article-title":"GANs trained by a two time-scale update rule converge to a local nash equilibrium","volume-title":"Proc. Adv. Neural Inf. Process. Syst. 30: Annu. Conf. Neural Inf. Process. Syst.","author":"Heusel","year":"2017"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2020.3030497"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952132"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10097117"}],"container-title":["IEEE\/ACM Transactions on Audio, Speech, and Language Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6570655\/10304349\/10731578.pdf?arnumber=10731578","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T01:28:37Z","timestamp":1732670917000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10731578\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":61,"URL":"https:\/\/doi.org\/10.1109\/taslp.2024.3485485","relation":{},"ISSN":["2329-9290","2329-9304"],"issn-type":[{"value":"2329-9290","type":"print"},{"value":"2329-9304","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]}}}