{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:23:38Z","timestamp":1775229818671,"version":"3.50.1"},"reference-count":40,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,5]]},"DOI":"10.1109\/icassp40776.2020.9053426","type":"proceedings-article","created":{"date-parts":[[2020,4,9]],"date-time":"2020-04-09T20:21:13Z","timestamp":1586463673000},"page":"7284-7288","source":"Crossref","is-referenced-by-count":142,"title":["Continuous Speech Separation: Dataset and Analysis"],"prefix":"10.1109","author":[{"given":"Zhuo","family":"Chen","sequence":"first","affiliation":[]},{"given":"Takuya","family":"Yoshioka","sequence":"additional","affiliation":[]},{"given":"Liang","family":"Lu","sequence":"additional","affiliation":[]},{"given":"Tianyan","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Zhong","family":"Meng","sequence":"additional","affiliation":[]},{"given":"Yi","family":"Luo","sequence":"additional","affiliation":[]},{"given":"Jian","family":"Wu","sequence":"additional","affiliation":[]},{"given":"Xiong","family":"Xiao","sequence":"additional","affiliation":[]},{"given":"Jinyu","family":"Li","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-2284"},{"key":"ref33","article-title":"DiPCo&#x2013;dinner party corpus","author":"van segbroeck","year":"2019"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1768"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-3181"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2821"},{"key":"ref37","article-title":"PyKaldi2: yet another speech toolkit based on Kaldi and PyTorch","author":"lu","year":"2019"},{"key":"ref36","article-title":"The kaldi speech recognition toolkit","author":"povey","year":"2011","journal-title":"Proc ASRU"},{"key":"ref35","first-page":"28","article-title":"The AMI meeting corpus: a preannouncement","author":"carletta","year":"2006","journal-title":"Proc ICMI-MLMI 2006"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2003.1198793"},{"key":"ref10","doi-asserted-by":"crossref","DOI":"10.1007\/978-3-030-01246-5_35","article-title":"The sound of pixels","author":"zhao","year":"2018"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1121\/1.2799929"},{"key":"ref11","doi-asserted-by":"crossref","first-page":"2655","DOI":"10.21437\/Interspeech.2017-667","article-title":"Speaker-aware neural network based beamformer for speaker extraction in speech mixtures","author":"zmolikova","year":"2017","journal-title":"InterSpeech"},{"key":"ref12","article-title":"VoiceFilter: targeted voice separation by speaker-conditioned spectrogram masking","author":"wang","year":"2018"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682245"},{"key":"ref14","first-page":"558","article-title":"Multi-channel multi-speaker overlapped speech recognition with location guided speech extraction network","author":"chen","year":"2018","journal-title":"Proc SLT 2018"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461370"},{"key":"ref16","first-page":"53","article-title":"Two-stage deep learning for noisy-reverberant speech enhancement","volume":"27","author":"zhao","year":"2018","journal-title":"IEEE\/ACM TASLP"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461639"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462081"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462116"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9004009"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683874"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682572"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462507"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461939"},{"key":"ref29","first-page":"276","article-title":"Advances in online audiovisual meeting transcription","author":"yoshioka","year":"2019","journal-title":"Proc ASRU"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952155"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683138"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2892241"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952154"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7471631"},{"key":"ref9","doi-asserted-by":"crossref","DOI":"10.1145\/3197517.3201357","article-title":"Looking to listen at the cocktail party: A speaker-independent audio-visual model for speech separation","author":"ephrat","year":"2018"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2019.2915167"},{"key":"ref22","article-title":"BSS_EVAL toolbox user guide&#x2014;revision 2.0","author":"f\u00e9votte","year":"2005"},{"key":"ref21","article-title":"FurcaNeXt: End-to-end monaural speech separation with dynamic gated dilated temporal convolutional networks","author":"shi","year":"2019"},{"key":"ref24","article-title":"Dual-path RNN: efficient long sequence modeling for time-domain single-channel speech separation","author":"luo","year":"2019"},{"key":"ref23","first-page":"626","article-title":"SDR&#x2014;half-baked or well done?","author":"le roux","year":"2019","journal-title":"Proc of ICASSP 2019"},{"key":"ref26","first-page":"499","article-title":"Low-latency real-time meeting recognition and understanding using distant microphones and omnidirectional camera","volume":"20","author":"hori","year":"2012","journal-title":"IEEE TASLP"},{"key":"ref25","first-page":"293","article-title":"Analysis of overlaps in meetings by dialog factors, hot spots, speakers, and collection site: insights for automatic speech recognition","author":"\u00e7etin","year":"2006","journal-title":"Proc INTERSPEECH"}],"event":{"name":"ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Barcelona, Spain","start":{"date-parts":[[2020,5,4]]},"end":{"date-parts":[[2020,5,8]]}},"container-title":["ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9040208\/9052899\/09053426.pdf?arnumber=9053426","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T00:07:16Z","timestamp":1656374836000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9053426\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,5]]},"references-count":40,"URL":"https:\/\/doi.org\/10.1109\/icassp40776.2020.9053426","relation":{},"subject":[],"published":{"date-parts":[[2020,5]]}}}