{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T06:25:03Z","timestamp":1774419903273,"version":"3.50.1"},"reference-count":34,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,4,6]],"date-time":"2025-04-06T00:00:00Z","timestamp":1743897600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,4,6]],"date-time":"2025-04-06T00:00:00Z","timestamp":1743897600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100006190","name":"Research and Development","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100006190","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100000266","name":"Engineering and Physical Sciences Research Council","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100000266","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,4,6]]},"DOI":"10.1109\/icassp49660.2025.10890129","type":"proceedings-article","created":{"date-parts":[[2025,3,12]],"date-time":"2025-03-12T17:15:19Z","timestamp":1741799719000},"page":"1-5","source":"Crossref","is-referenced-by-count":3,"title":["FlowSep: Language-Queried Sound Separation with Rectified Flow Matching"],"prefix":"10.1109","author":[{"given":"Yi","family":"Yuan","sequence":"first","affiliation":[{"name":"University of Surrey,Centre for Vision, Speech and Signal Processing (CVSSP),Guildford,UK"}]},{"given":"Xubo","family":"Liu","sequence":"additional","affiliation":[{"name":"University of Surrey,Centre for Vision, Speech and Signal Processing (CVSSP),Guildford,UK"}]},{"given":"Haohe","family":"Liu","sequence":"additional","affiliation":[{"name":"University of Surrey,Centre for Vision, Speech and Signal Processing (CVSSP),Guildford,UK"}]},{"given":"Mark D.","family":"Plumbley","sequence":"additional","affiliation":[{"name":"University of Surrey,Centre for Vision, Speech and Signal Processing (CVSSP),Guildford,UK"}]},{"given":"Wenwu","family":"Wang","sequence":"additional","affiliation":[{"name":"University of Surrey,Centre for Vision, Speech and Signal Processing (CVSSP),Guildford,UK"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2915167"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2018.2842159"},{"key":"ref3","first-page":"342","article-title":"Decoupling magnitude and phase estimation with deep resunet for music source separation","volume-title":"International Society for Music Information Retrieval Conference","author":"Kong"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2555"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2210"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447061"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-10894"},{"key":"ref8","first-page":"71 340","article-title":"Audit: Audio editing by following instructions with latent diffusion models","volume":"36","author":"Wang","year":"2023","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref9","article-title":"WavCraft: Audio Editing and Generation with Large Language Models","volume-title":"ICLR 2024 Workshop on Large Language Model (LLM) Agents","author":"Liang"},{"key":"ref10","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"International Conference on Machine Learning","author":"Radford"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.compeleceng.2020.106606"},{"key":"ref12","article-title":"BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding","author":"Devlin","year":"2018"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2022-10894"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095969"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-676"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447219"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10446705"},{"key":"ref18","article-title":"Flow straight and fast: Learning to generate and transfer data with rectified flow","volume-title":"The Eleventh International Conference on Learning Representations","author":"Liu"},{"issue":"1","key":"ref19","first-page":"5485","article-title":"Exploring the limits of transfer learning with a unified text-to-text transformer","volume":"21","author":"Raffel","year":"2020","journal-title":"Journal of Machine Learning Research"},{"key":"ref20","article-title":"AudioCaps: Generating captions for audios in the wild","volume-title":"Annual Conference of the North American Chapter of the Association for Computational Linguistics","author":"Kim"},{"key":"ref21","article-title":"Very Deep Convolutional Networks for Large-Scale Image Recognition","author":"Simonyan","year":"2014"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3419446"},{"key":"ref23","article-title":"BigVGAN: A universal neural vocoder with large-scale training","volume-title":"International Conference on Learning Representations","author":"Lee"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447898"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681688"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"ref27","article-title":"AudioLDM: Text-to-Audio generation with latent diffusion models","volume-title":"International Conference on Machine Learning","author":"Liu"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2024.3399607"},{"key":"ref29","article-title":"Audiobox: Unified audio generation with natural language prompts","author":"Vyas","year":"2023"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/2733373.2806390"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952261"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2019-2219"},{"key":"ref33","article-title":"A reference-free metric for language-queried audio source separation using contrastive language-audio pretraining","volume-title":"Workshop on Detection and Classification of Acoustic Scenes and Events","author":"Xiao"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2020.3030497"}],"event":{"name":"ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Hyderabad, India","start":{"date-parts":[[2025,4,6]]},"end":{"date-parts":[[2025,4,11]]}},"container-title":["ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10887540\/10887541\/10890129.pdf?arnumber=10890129","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T05:25:19Z","timestamp":1774416319000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10890129\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4,6]]},"references-count":34,"URL":"https:\/\/doi.org\/10.1109\/icassp49660.2025.10890129","relation":{},"subject":[],"published":{"date-parts":[[2025,4,6]]}}}