{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,5]],"date-time":"2025-11-05T06:56:58Z","timestamp":1762325818912},"reference-count":39,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,12,16]],"date-time":"2023-12-16T00:00:00Z","timestamp":1702684800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,12,16]],"date-time":"2023-12-16T00:00:00Z","timestamp":1702684800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,12,16]]},"DOI":"10.1109\/asru57964.2023.10389625","type":"proceedings-article","created":{"date-parts":[[2024,1,19]],"date-time":"2024-01-19T18:38:40Z","timestamp":1705689520000},"page":"1-8","source":"Crossref","is-referenced-by-count":5,"title":["The Second Multi-Channel Multi-Party Meeting Transcription Challenge (M2MeT 2.0): A Benchmark for Speaker-Attributed ASR"],"prefix":"10.1109","author":[{"given":"Yuhao","family":"Liang","sequence":"first","affiliation":[{"name":"Northwestern Polytechnical University,Audio, Speech and Language Processing Group (ASLP&#x0040;NPU), School of Computer Science,China"}]},{"given":"Mohan","family":"Shi","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China (USTC),NERC-SLIP,China"}]},{"given":"Fan","family":"Yu","sequence":"additional","affiliation":[{"name":"Alibaba Group,Speech Lab of DAMO Academy,China"}]},{"given":"Yangze","family":"Li","sequence":"additional","affiliation":[{"name":"Northwestern Polytechnical University,Audio, Speech and Language Processing Group (ASLP&#x0040;NPU), School of Computer Science,China"}]},{"given":"Shiliang","family":"Zhang","sequence":"additional","affiliation":[{"name":"Alibaba Group,Speech Lab of DAMO Academy,China"}]},{"given":"Zhihao","family":"Du","sequence":"additional","affiliation":[{"name":"Alibaba Group,Speech Lab of DAMO Academy,China"}]},{"given":"Qian","family":"Chen","sequence":"additional","affiliation":[{"name":"Alibaba Group,Speech Lab of DAMO Academy,China"}]},{"given":"Lei","family":"Xie","sequence":"additional","affiliation":[{"name":"Northwestern Polytechnical University,Audio, Speech and Language Processing Group (ASLP&#x0040;NPU), School of Computer Science,China"}]},{"given":"Yanmin","family":"Qian","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,SpeechLab, Department of Computer Science and Engineering,China"}]},{"given":"Jian","family":"Wu","sequence":"additional","affiliation":[{"name":"Singapore Institute of Technology,ICT Cluster,Singapore"}]},{"given":"Zhuo","family":"Chen","sequence":"additional","affiliation":[{"name":"Singapore Institute of Technology,ICT Cluster,Singapore"}]},{"given":"Kong Aik","family":"Lee","sequence":"additional","affiliation":[{"name":"Singapore Institute of Technology,ICT Cluster,Singapore"}]},{"given":"Zhijie","family":"Yan","sequence":"additional","affiliation":[{"name":"Alibaba Group,Speech Lab of DAMO Academy,China"}]},{"given":"Hui","family":"Bu","sequence":"additional","affiliation":[{"name":"Beijing Shell Shell Technology Co., Ltd.,Beijing,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746465"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746270"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.21437\/CHiME.2020-1"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746683"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-1085"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2021.101254"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003959"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2019-1101"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054683"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747765"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/SLT54892.2023.10023174"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-1602"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/icassp49357.2023.10095185"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747019"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2020-3015"},{"key":"ref16","first-page":"17627","article-title":"Branchformer: Parallel mlp-attention architectures to capture local and global context for speech recognition and understanding","volume-title":"Proc. ICML","author":"Peng"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-9996"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2021-101"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-11210"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/11677482_32"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/11965152_28"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-68585-2_36"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7471631"},{"journal-title":"arxiv preprint arxiv:2005.11262","article-title":"Librimix: An open-source dataset for generalizable speech separation","author":"Cosentino","key":"ref24"},{"key":"ref25","first-page":"100","article-title":"The AMI meeting corpus","volume-title":"Proc. ICMT","author":"McCowan"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053426"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1397"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1397"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054017"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1428"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.505"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr.2016.90"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1513"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-517"},{"journal-title":"arXiv preprint arXiv:2106.05642","article-title":"U2++: unified two-pass bidirectional end-to-end model for speech recognition","author":"Wu","key":"ref35"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/slt54892.2023.10022715"},{"key":"ref37","doi-asserted-by":"crossref","DOI":"10.1109\/ASRU57964.2023.10389654","article-title":"Pp-met: a real-world personalized prompt based meeting transcription system","volume-title":"arxiv preprint arXiv:2309.16247","author":"Lyu"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-11425"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2019-2680"}],"event":{"name":"2023 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","start":{"date-parts":[[2023,12,16]]},"location":"Taipei, Taiwan","end":{"date-parts":[[2023,12,20]]}},"container-title":["2023 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10388490\/10389614\/10389625.pdf?arnumber=10389625","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,23]],"date-time":"2024-01-23T16:39:55Z","timestamp":1706027995000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10389625\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,12,16]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/asru57964.2023.10389625","relation":{},"subject":[],"published":{"date-parts":[[2023,12,16]]}}}