{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T20:15:50Z","timestamp":1776888950712,"version":"3.51.2"},"reference-count":33,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,12]],"date-time":"2025-10-12T00:00:00Z","timestamp":1760227200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,12]],"date-time":"2025-10-12T00:00:00Z","timestamp":1760227200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,12]]},"DOI":"10.1109\/waspaa66052.2025.11230940","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:47Z","timestamp":1763146007000},"page":"1-5","source":"Crossref","is-referenced-by-count":1,"title":["Generating Separated Singing Vocals Using a Diffusion Model Conditioned on Music Mixtures"],"prefix":"10.1109","author":[{"given":"Gen\u00eds","family":"Plaja-Roglans","sequence":"first","affiliation":[{"name":"Music.AI,Salt Lake City,USA"}]},{"given":"Yun-Ning","family":"Hung","sequence":"additional","affiliation":[{"name":"Music.AI,Salt Lake City,USA"}]},{"given":"Xavier","family":"Serra","sequence":"additional","affiliation":[{"name":"Universitat Pompeu Fabra,Music Technology Group,Spain"}]},{"given":"Igor","family":"Pereira","sequence":"additional","affiliation":[{"name":"Music.AI,Salt Lake City,USA"}]}],"member":"263","reference":[{"key":"ref1","first-page":"745","article-title":"Singing voice separation with deep U-Net convolutional networks","volume-title":"18th Int. Society for Music Information Retrieval Conf. (ISMIR)","author":"Jansson"},{"key":"ref2","article-title":"Carnatic singing voice separation using cold diffusion on training data with bleeding","volume-title":"24th Int. Society for Music Information Retrieval Conf. (ISMIR)","author":"Plaja-Roglans"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.5334\/tismir.171"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.21105\/joss.02154"},{"key":"ref5","article-title":"D3Net: densely connected multidilated DenseNet for music source separation","author":"Takahashi","year":"2020"},{"key":"ref6","article-title":"Music separation enhancement with generative modeling","volume-title":"23th Int. Society for Music Information Retrieval Conf. (ISMIR)","author":"Schaffer"},{"key":"ref7","article-title":"Separate and diffuse: Using a pretrained diffusion model for better source separation","volume-title":"12th Int. Conf. on Learning Representations","author":"Lutati"},{"key":"ref8","article-title":"Learned complex masks for multi-instrument source separation","author":"Jansson","year":"2021"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2022.3219355"},{"key":"ref10","article-title":"Diffusion Models Beat GANs on Image Synthesis","volume-title":"35th Conf. on Neural Information Processing Systems (NeurIPS 2021)","author":"Dhariwal"},{"key":"ref11","article-title":"Diffwave: A versatile diffusion model for audio synthesis","volume-title":"9th Int. Conf. on Learning Representations (ICLR)","author":"Kong"},{"key":"ref12","article-title":"Fast timing-conditioned latent audio diffusion","volume-title":"Int. Conf. on Machine Learning (ICML)","author":"Evans"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095637"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053024"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3115"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i8.26131"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10889006"},{"key":"ref18","article-title":"Multi-source diffusion models for simultaneous music generation and separation","volume-title":"The 12th Int. Conf. on Learning Representations","author":"Mariani"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10889421"},{"key":"ref20","article-title":"Universal speech enhancement with score-based diffusion","author":"Serr\u00e0","year":"2022"},{"key":"ref21","article-title":"Progressive distillation for fast sampling of diffusion models","volume-title":"Int. Conf. on Learning Representations (ICLR)","author":"Salimans"},{"key":"ref22","article-title":"Mo\u00fbsai: Text-to-music generation with long-context latent diffusion","author":"Schneider","year":"2023"},{"key":"ref23","article-title":"Denoising diffusion implicit models","volume-title":"Int. Conf. on Learning Representations (ICLR)","author":"Song"},{"key":"ref24","first-page":"334","article-title":"Wave-U-Net: A multi-scale neural network for end-to-end audio source separation","volume-title":"19th Int. Society for Music Information Retrieval Conf. (ISMIR)","author":"Stoller"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-11366"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.11671"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2023.127063"},{"key":"ref29","doi-asserted-by":"crossref","DOI":"10.1109\/ICASSP48485.2024.10446843","article-title":"Music source separation with band-split rope transformer","volume-title":"SDX Workshop","author":"Lu"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/icassp49357.2023.10096956"},{"key":"ref31","article-title":"FlashAttention: Fast and memory-efficient exact attention with IO-awareness","author":"Dao","year":"2022","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref32","article-title":"MUSDB18 - a corpus for music separation","author":"Rafii","year":"2017"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-93764-9_28"}],"event":{"name":"2025 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)","location":"Tahoe City, CA, USA","start":{"date-parts":[[2025,10,12]]},"end":{"date-parts":[[2025,10,15]]}},"container-title":["2025 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11230875\/11230917\/11230940.pdf?arnumber=11230940","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:23:00Z","timestamp":1763191380000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11230940\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,12]]},"references-count":33,"URL":"https:\/\/doi.org\/10.1109\/waspaa66052.2025.11230940","relation":{},"subject":[],"published":{"date-parts":[[2025,10,12]]}}}