{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,29]],"date-time":"2025-11-29T07:58:48Z","timestamp":1764403128018,"version":"3.37.3"},"reference-count":49,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,12,13]],"date-time":"2021-12-13T00:00:00Z","timestamp":1639353600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,13]],"date-time":"2021-12-13T00:00:00Z","timestamp":1639353600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001659","name":"Deutsche Forschungsgemeinschaft","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001659","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,12,13]]},"DOI":"10.1109\/asru51503.2021.9688052","type":"proceedings-article","created":{"date-parts":[[2022,2,3]],"date-time":"2022-02-03T20:31:00Z","timestamp":1643920260000},"page":"717-724","source":"Crossref","is-referenced-by-count":8,"title":["Target Language Extraction at Multilingual Cocktail Parties"],"prefix":"10.1109","author":[{"given":"Marvin","family":"Borsdorf","sequence":"first","affiliation":[{"name":"University of Bremen,Machine Listening Lab (MLL),Germany"}]},{"given":"Haizhou","family":"Li","sequence":"additional","affiliation":[{"name":"National University of Singapore,Department of Electrical and Computer Engineering,Singapore"}]},{"given":"Tanja","family":"Schultz","sequence":"additional","affiliation":[{"name":"University of Bremen,Cognitive Systems Lab (CSL),Germany"}]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414092"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2210"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2020.2987429"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1436"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1193"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1101"},{"key":"ref37","first-page":"691","article-title":"Improving Speaker Dis-crimination of Target Speech Extraction With Time-Domain SpeakerBeam","author":"delcroix","year":"0","journal-title":"Proc ICASSP"},{"key":"ref36","first-page":"1421","article-title":"X-TaSNet: Robust and Ac-curate Time-Domain Speaker Extraction Network","author":"zhang","year":"0","journal-title":"Proc INTERSPEECH"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413359"},{"key":"ref34","first-page":"1406","article-title":"SpEx+: A Complete Time Domain Speaker Extraction Net-work","author":"ge","year":"0","journal-title":"Proc INTERSPEECH"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1410"},{"key":"ref27","doi-asserted-by":"crossref","first-page":"800","DOI":"10.1109\/JSTSP.2019.2922820","article-title":"SpeakerBeam: Speaker Aware Neural Network for Target Speaker Extraction in Speech Mixtures","volume":"13","author":"\u017emol\u00edkov\u00e1","year":"2019","journal-title":"IEEE Journal of Selected Topics in Sig-nal Processing"},{"key":"ref29","first-page":"6990","article-title":"Optimization of Speaker Extraction Neural Network with Magnitude and Tem-poral Spectrum Approximation Loss","author":"xu","year":"0","journal-title":"Proc ICASSP"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7471631"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1121\/1.1907229"},{"key":"ref20","first-page":"3730","article-title":"Single Chan-nel Voice Separation for Unknown Number of Speakers Under Reverberant and Noisy Settings","author":"chazan","year":"0","journal-title":"Proc ICASSP"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2018\/605"},{"key":"ref21","first-page":"5064","article-title":"Lis-tening to Each Speaker One by One with Recurrent Selective Hearing Networks","author":"kinoshita","year":"0","journal-title":"Proc ICASSP"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2519"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1550"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683087"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462661"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413818"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2205"},{"key":"ref40","first-page":"345","article-title":"GlobalPhone: A Multilingual Speech and Text Database Developed at Karlsruhe University","author":"schultz","year":"0","journal-title":"Proc ICSLP"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/SLT48900.2021.9383464"},{"key":"ref13","first-page":"5759","article-title":"Sandglas-set: A Light Multi-Granularity Self-Attentive Network for Time-Domain Speech Separation","author":"lam","year":"0","journal-title":"Proc ICASSP"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2017.2726762"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2018.2795749"},{"key":"ref16","first-page":"2622","article-title":"Separating Varying Numbers of Sources with Auxiliary Autoencoding Loss","author":"luo","year":"0","journal-title":"Proc INTERSPEECH"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414774"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414677"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414205"},{"key":"ref4","first-page":"696","article-title":"TasNet: Time-Domain Audio Sep-aration Network for Real-Time, Single-Channel Speech Sepa-ration","author":"luo","year":"0","journal-title":"Proc ICASSP"},{"key":"ref3","first-page":"241","article-title":"Permutation Invari-ant Training of Deep Models for Speaker-Independent Multi-talker Speech Separation","author":"yu","year":"0","journal-title":"Proc ICASSP"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054266"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2915167"},{"key":"ref8","article-title":"Wavesplit: End-to-End Speech Separation by Speaker Clustering","author":"zeghidour","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054172"},{"key":"ref49","first-page":"626","article-title":"SDR - Half-Baked or Well Done?","author":"le roux","year":"0","journal-title":"Proc ICASSP"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413901"},{"key":"ref46","first-page":"2637","article-title":"Asteroid: The PyTorch-based Audio Source Sep-aration Toolkit for Researchers","author":"pariente","year":"0","journal-title":"Proc INTERSPEECH"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952155"},{"key":"ref48","article-title":"Adam: A Method for Stochastic Optimization","author":"kingma","year":"0","journal-title":"Proc ICLR"},{"key":"ref47","article-title":"SpeechBrain: A General-Purpose Speech Toolkit","author":"ravanelli","year":"2021","journal-title":"ArXiv Preprint"},{"journal-title":"European Language Resources Association (ELRA)","year":"2020","key":"ref42"},{"key":"ref41","first-page":"8126","article-title":"GlobalPhone: A Mul-tilingual Text & Speech Database in 20 Languages","author":"schultz","year":"0","journal-title":"Proc ICASSP"},{"key":"ref44","first-page":"545","article-title":"Single-Channel Multi-Speaker Separation using Deep Clus-tering","author":"isik","year":"0","journal-title":"Proc INTERSPEECH"},{"key":"ref43","article-title":"Scripts to Create wsj0&#x2013;2 Speaker Mixtures","author":"isik","year":"0","journal-title":"MERL Research"}],"event":{"name":"2021 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","start":{"date-parts":[[2021,12,13]]},"location":"Cartagena, Colombia","end":{"date-parts":[[2021,12,17]]}},"container-title":["2021 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9687821\/9687855\/09688052.pdf?arnumber=9688052","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,16]],"date-time":"2022-05-16T20:41:12Z","timestamp":1652733672000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9688052\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12,13]]},"references-count":49,"URL":"https:\/\/doi.org\/10.1109\/asru51503.2021.9688052","relation":{},"subject":[],"published":{"date-parts":[[2021,12,13]]}}}