{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T10:28:13Z","timestamp":1763202493839,"version":"3.28.0"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,6,6]]},"DOI":"10.1109\/icassp39728.2021.9413723","type":"proceedings-article","created":{"date-parts":[[2021,5,13]],"date-time":"2021-05-13T19:53:45Z","timestamp":1620935625000},"page":"31-35","source":"Crossref","is-referenced-by-count":17,"title":["Semi-Supervised Singing Voice Separation With Noisy Self-Training"],"prefix":"10.1109","author":[{"given":"Zhepei","family":"Wang","sequence":"first","affiliation":[]},{"given":"Ritwik","family":"Giri","sequence":"additional","affiliation":[]},{"given":"Umut","family":"Isik","sequence":"additional","affiliation":[]},{"given":"Jean-Marc","family":"Valin","sequence":"additional","affiliation":[]},{"given":"Arvindh","family":"Krishnaswamy","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Music source separation in the waveform domain","volume":"abs 1911 13254","author":"d\u00e9fossez","year":"2019","journal-title":"ArXiv"},{"key":"ref11","article-title":"Voice separation with an unknown number of multiple speakers","volume":"abs 2003 1531","author":"nachmani","year":"2020","journal-title":"ArXiv"},{"doi-asserted-by":"publisher","key":"ref12","DOI":"10.1109\/ICASSP40776.2020.9053513"},{"key":"ref13","doi-asserted-by":"crossref","first-page":"310","DOI":"10.1109\/TASL.2009.2026503","article-title":"On the improvement of singing voice separation for monaural recordings using the mir-1k dataset","volume":"18","author":"hsu","year":"2010","journal-title":"IEEE Transactions on Audio Speech and Language Processing"},{"doi-asserted-by":"publisher","key":"ref14","DOI":"10.1109\/TSP.2014.2332434"},{"year":"2017","author":"rafii","article-title":"The MUSDB18 corpus for music separation","key":"ref15"},{"doi-asserted-by":"publisher","key":"ref16","DOI":"10.1109\/ICASSP.2017.7952158"},{"doi-asserted-by":"publisher","key":"ref17","DOI":"10.23919\/EUSIPCO.2019.8902810"},{"doi-asserted-by":"publisher","key":"ref18","DOI":"10.1109\/ICASSP.2018.8461722"},{"doi-asserted-by":"publisher","key":"ref19","DOI":"10.1109\/ICASSP.2019.8682443"},{"key":"ref28","first-page":"293","author":"st\u00f6ter","year":"2018","journal-title":"The 2018 Signal Separation Evaluation Campaign"},{"key":"ref4","article-title":"Wave-u-net: A multi-scale neural network for end-to-end audio source separation","author":"stoller","year":"2018","journal-title":"ISMIR"},{"year":"2019","author":"smule","article-title":"DAMP-VSEP: Smule Digital Archive of Mobile Performances - Vocal Separation","key":"ref27"},{"key":"ref3","doi-asserted-by":"crossref","DOI":"10.21105\/joss.01667","article-title":"Open-unmix - a reference implementation for music source separation","author":"st\u00f6ter","year":"2019","journal-title":"Journal of Open Source Software"},{"doi-asserted-by":"publisher","key":"ref6","DOI":"10.1109\/ICASSP.2019.8683555"},{"year":"2020","author":"manilow","journal-title":"Open Source Tools & Data for Music Source Separation","key":"ref29"},{"doi-asserted-by":"publisher","key":"ref5","DOI":"10.21105\/joss.02154"},{"key":"ref8","article-title":"Voice and accompaniment separation in music using self-attention convolutional neural network","volume":"abs 2003 8954","author":"liu","year":"2020","journal-title":"ArXiv"},{"key":"ref7","doi-asserted-by":"crossref","DOI":"10.1109\/MMSP48831.2020.9287108","article-title":"Multi-channel u-net for music source separation","author":"kadandale","year":"2020"},{"doi-asserted-by":"publisher","key":"ref2","DOI":"10.1109\/IWAENC.2018.8521383"},{"key":"ref9","article-title":"Demucs: Deep extractor for music sources with extra unlabeled data remixed","volume":"abs 1909 1174","author":"d\u00e9fossez","year":"2019","journal-title":"ArXiv"},{"doi-asserted-by":"publisher","key":"ref1","DOI":"10.1109\/WASPAA.2017.8169987"},{"key":"ref20","article-title":"Unsupervised interpretable representation learning for singing voice separation","volume":"abs 2003 1567","author":"mimilakis","year":"2020","journal-title":"ArXiv"},{"key":"ref22","article-title":"Bootstrapping deep music separation from primitive auditory grouping principles","volume":"abs 1910 11133","author":"seetharaman","year":"2019","journal-title":"ArXiv"},{"key":"ref21","article-title":"Revisiting representation learning for singing voice separation with sinkhorn distances","volume":"abs 2007 2780","author":"mimilakis","year":"2020","journal-title":"ArXiv"},{"doi-asserted-by":"publisher","key":"ref24","DOI":"10.21437\/Interspeech.2020-1470"},{"key":"ref23","article-title":"Self-training with noisy student improves imagenet classification","volume":"abs 1911 4252","author":"xie","year":"2019","journal-title":"ArXiv"},{"key":"ref26","article-title":"Poconet: Better speech enhancement with frequency-positional embeddings, semi-supervised conversational data, and biased loss","author":"isik","year":"2020","journal-title":"Proceedings of the Annual Conference of the International Speech Communication Association INTERSPEECH"},{"key":"ref25","article-title":"Finding strength in weakness: Learning to separate sounds with weak supervision","volume":"abs 1911 2182","author":"pishdadian","year":"2019","journal-title":"ArXiv"}],"event":{"name":"ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2021,6,6]]},"location":"Toronto, ON, Canada","end":{"date-parts":[[2021,6,11]]}},"container-title":["ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9413349\/9413350\/09413723.pdf?arnumber=9413723","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,12,27]],"date-time":"2022-12-27T08:29:12Z","timestamp":1672129752000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9413723\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,6,6]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/icassp39728.2021.9413723","relation":{},"subject":[],"published":{"date-parts":[[2021,6,6]]}}}