{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T20:34:16Z","timestamp":1776890056589,"version":"3.51.2"},"reference-count":40,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,12]],"date-time":"2025-10-12T00:00:00Z","timestamp":1760227200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,12]],"date-time":"2025-10-12T00:00:00Z","timestamp":1760227200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,12]]},"DOI":"10.1109\/waspaa66052.2025.11230977","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:47Z","timestamp":1763146007000},"page":"1-5","source":"Crossref","is-referenced-by-count":2,"title":["Learning to Upsample and Upmix Audio in the Latent Domain"],"prefix":"10.1109","author":[{"given":"Dimitrios","family":"Bralios","sequence":"first","affiliation":[{"name":"University of Illinois Urbana-Champaign,Urbana,IL,USA"}]},{"given":"Paris","family":"Smaragdis","sequence":"additional","affiliation":[{"name":"University of Illinois Urbana-Champaign,Urbana,IL,USA"}]},{"given":"Jonah","family":"Casebeer","sequence":"additional","affiliation":[{"name":"Adobe Research,San Francisco,CA,USA"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3129994"},{"key":"ref2","article-title":"High fidelity neural audio compression","author":"D\u00e9fossez","year":"2023","journal-title":"Transactions on Machine Learning Research"},{"key":"ref3","article-title":"High-fidelity audio compression with improved rvqgan","volume-title":"Proc. NeurIPS","volume":"36","author":"Kumar"},{"key":"ref4","article-title":"Rave: A variational autoencoder for fast and high-quality neural audio synthesis","author":"Caillon","year":"2021"},{"key":"ref5","first-page":"21450","article-title":"Audioldm: Text-to-audio generation with latent diffusion models","volume-title":"Proc. ICML","author":"Liu"},{"key":"ref6","first-page":"13916","article-title":"Make-an-audio: Text-to-audio generation with prompt-enhanced diffusion models","volume-title":"Proc. ICML","author":"Huang"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10888461"},{"key":"ref8","first-page":"12652","article-title":"Fast timing-conditioned latent audio diffusion","volume-title":"Proc. ICML","author":"Evans"},{"key":"ref9","article-title":"A review on score-based generative models for audio applications","author":"Zhu","year":"2025"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3399607"},{"key":"ref11","article-title":"Audiogen: Textually guided audio generation","volume-title":"Proc. ICLR","author":"Kreuk"},{"key":"ref12","article-title":"Simple and controllable music generation","volume-title":"Proc. NeurIPS","author":"Copet"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2023.3288409"},{"key":"ref14","article-title":"Musiclm: Generating music from text","author":"Agostinelli","year":"2023"},{"key":"ref15","article-title":"Uniaudio: Towards universal audio generation with large language models","volume-title":"Proc. ICML","author":"Yang"},{"key":"ref16","article-title":"Soundstorm: Efficient parallel audio generation","author":"Borsos","year":"2023"},{"key":"ref17","article-title":"Masked audio generation using a single non-autoregressive transformer","volume-title":"Proc. ICLR","author":"Ziv"},{"key":"ref18","article-title":"Vampnet: Music generation via masked acoustic token modeling","volume-title":"Proc. ISMIR","author":"Flores Garcia"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095382"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2024.3432393"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2022.3190726"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3507566"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10890825"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2023.3333205"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447246"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10889180"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-2069"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-590"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-337"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10890379"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/APSIPAASC63619.2025.10848753"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10890252"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414605"},{"key":"ref34","article-title":"Melgan: Generative adversarial networks for conditional waveform synthesis","volume-title":"Proc. NeurIPS","volume":"32","author":"Kumar"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746978"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.304"},{"key":"ref37","article-title":"Vocos: Closing the gap between time-domain and fourierbased neural vocoders for high-quality audio synthesis","volume-title":"Proc. ICLR","author":"Siuzdak"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01548"},{"key":"ref39","article-title":"Fma: A dataset for music analysis","volume-title":"Proc. ISMIR","author":"Defferrard"},{"key":"ref40","article-title":"auraloss: Audio focused loss functions in pytorch","volume-title":"Digital music research network one-day workshop (DMRN+ 15)","author":"Steinmetz"}],"event":{"name":"2025 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)","location":"Tahoe City, CA, USA","start":{"date-parts":[[2025,10,12]]},"end":{"date-parts":[[2025,10,15]]}},"container-title":["2025 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11230875\/11230917\/11230977.pdf?arnumber=11230977","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:32:47Z","timestamp":1763191967000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11230977\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,12]]},"references-count":40,"URL":"https:\/\/doi.org\/10.1109\/waspaa66052.2025.11230977","relation":{},"subject":[],"published":{"date-parts":[[2025,10,12]]}}}