{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,27]],"date-time":"2025-07-27T07:47:59Z","timestamp":1753602479453},"reference-count":27,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,5,23]],"date-time":"2022-05-23T00:00:00Z","timestamp":1653264000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,5,23]]},"DOI":"10.1109\/icassp43922.2022.9747381","type":"proceedings-article","created":{"date-parts":[[2022,4,27]],"date-time":"2022-04-27T19:50:34Z","timestamp":1651089034000},"page":"9176-9180","source":"Crossref","is-referenced-by-count":5,"title":["The Volcspeech System for the ICASSP 2022 Multi-Channel Multi-Party Meeting Transcription Challenge"],"prefix":"10.1109","author":[{"given":"Chen","family":"Shen","sequence":"first","affiliation":[{"name":"Bytedance AI Lab"}]},{"given":"Yi","family":"Liu","sequence":"additional","affiliation":[{"name":"Bytedance AI Lab"}]},{"given":"Wenzhi","family":"Fan","sequence":"additional","affiliation":[{"name":"Bytedance AI Lab"}]},{"given":"Bin","family":"Wang","sequence":"additional","affiliation":[{"name":"Bytedance AI Lab"}]},{"given":"Shixue","family":"Wen","sequence":"additional","affiliation":[{"name":"Bytedance AI Lab"}]},{"given":"Yao","family":"Tian","sequence":"additional","affiliation":[{"name":"Bytedance AI Lab"}]},{"given":"Jun","family":"Zhang","sequence":"additional","affiliation":[{"name":"Bytedance AI Lab"}]},{"given":"Jingsheng","family":"Yang","sequence":"additional","affiliation":[{"name":"Bytedance AI Lab"}]},{"given":"Zejun","family":"Ma","sequence":"additional","affiliation":[{"name":"Bytedance AI Lab"}]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-941"},{"article-title":"Bayesian HMM clustering of x-vector sequences (VBx) in speaker diarization: theory, implementation and analysis on standard tasks","year":"2020","author":"landini","key":"ref12"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/3292500.3330701"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2019.2961071"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2015.2512042"},{"key":"ref16","first-page":"2472","article-title":"DCCRN: Deep complex convolution recurrent network for phase-aware speech enhancement","author":"hu","year":"2021","journal-title":"Proc INTERSPEECH ISCA"},{"article-title":"DOVER-Lap: A Method for Combining Overlap-aware Diarization Outputs","year":"2020","author":"raj","key":"ref17"},{"article-title":"The bytedance speaker diarization system for the voxceleb speaker recognition challenge 2021","year":"2021","author":"wang","key":"ref18"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953075"},{"journal-title":"Aishell-4 An open source dataset for speech enhancement separation recognition and speaker diarization in conference scenario","year":"2021","author":"fu","key":"ref4"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2680"},{"article-title":"CN-Celeb: multi-genre speaker recognition","year":"2020","author":"li","key":"ref3"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953152"},{"article-title":"Mu-san: A music, speech, and noise corpus","year":"2015","author":"snyder","key":"ref5"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413544"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/WASPAA52581.2021.9632780"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746270"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461623"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746465"},{"key":"ref20","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1002\/9780470714089"},{"key":"ref23","first-page":"2797","article-title":"Serialized output training for end-to-end overlapped speech recognition","author":"kanda","year":"2020","journal-title":"Proc INTERSPEECH ISCA"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1456"},{"article-title":"Mobilenets: Efficient convolutional neural networks for mobile vision applications","year":"2017","author":"howard","key":"ref25"}],"event":{"name":"ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2022,5,23]]},"location":"Singapore, Singapore","end":{"date-parts":[[2022,5,27]]}},"container-title":["ICASSP 2022 - 2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9745891\/9746004\/09747381.pdf?arnumber=9747381","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,8,15]],"date-time":"2022-08-15T20:08:14Z","timestamp":1660594094000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9747381\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,5,23]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/icassp43922.2022.9747381","relation":{},"subject":[],"published":{"date-parts":[[2022,5,23]]}}}