{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:29:22Z","timestamp":1775230162478,"version":"3.50.1"},"reference-count":31,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,5]]},"DOI":"10.1109\/icassp40776.2020.9054721","type":"proceedings-article","created":{"date-parts":[[2020,4,9]],"date-time":"2020-04-09T20:21:13Z","timestamp":1586463673000},"page":"7264-7268","source":"Crossref","is-referenced-by-count":29,"title":["An Empirical Study of Conv-Tasnet"],"prefix":"10.1109","author":[{"given":"Berkan","family":"Kadioglu","sequence":"first","affiliation":[]},{"given":"Michael","family":"Horgan","sequence":"additional","affiliation":[]},{"given":"Xiaoyu","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Jordi","family":"Pons","sequence":"additional","affiliation":[]},{"given":"Dan","family":"Darcy","sequence":"additional","affiliation":[]},{"given":"Vivek","family":"Kumar","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref31","article-title":"CSTR VCTK corpus: English multi-speaker corpus for CSTR voice cloning toolkit","author":"veaux","year":"2017","journal-title":"Centre for Speech Technology Research University of Edinburgh"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2441"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1373"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1292"},{"key":"ref12","first-page":"334","article-title":"Wave-u-net: A multi-scale neural network for end-to-end audio source separation","author":"stoller","year":"2018","journal-title":"ISMIR"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1177"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.23919\/EUSIPCO.2018.8553571"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2181"},{"key":"ref16","article-title":"Endto-end multi-channel speech separation","author":"gu","year":"2019"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-3181"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683634"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-49409-8_7"},{"key":"ref28","doi-asserted-by":"crossref","first-page":"28","DOI":"10.21437\/Interspeech.2009-5","article-title":"Feature extraction for robust speech recognition using a power-law nonlinearity and power-bias subtraction","author":"kim","year":"2009","journal-title":"InterSpeech"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TASSP.1984.1164317"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2941148"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1037\/h0046162"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682834"},{"key":"ref29","article-title":"Script to generate the multi-speaker dataset using wsj0","year":"0"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2019.2904183"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2915167"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462417"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7471631"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683800"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2017.2726762"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682912"},{"key":"ref22","article-title":"Sample-level deep convolutional neural networks for music auto-tagging using raw waveforms","author":"lee","year":"2017","journal-title":"Sound and Music Computing Conference"},{"key":"ref21","first-page":"637","article-title":"End-to-end learning for music audio tagging at scale","author":"pons","year":"2018","journal-title":"ISMIR"},{"key":"ref24","article-title":"Wavenet: A generative model for raw audio","author":"den oord","year":"2016"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461975"},{"key":"ref26","first-page":"626","article-title":"Sdr&#x2013;half-baked or well done?","author":"le roux","year":"2019","journal-title":"ICASSP"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461819"}],"event":{"name":"ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Barcelona, Spain","start":{"date-parts":[[2020,5,4]]},"end":{"date-parts":[[2020,5,8]]}},"container-title":["ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9040208\/9052899\/09054721.pdf?arnumber=9054721","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,9,29]],"date-time":"2023-09-29T19:27:24Z","timestamp":1696015644000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9054721\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,5]]},"references-count":31,"URL":"https:\/\/doi.org\/10.1109\/icassp40776.2020.9054721","relation":{},"subject":[],"published":{"date-parts":[[2020,5]]}}}