{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,10]],"date-time":"2025-10-10T21:43:41Z","timestamp":1760132621089,"version":"3.28.0"},"reference-count":37,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,6,6]],"date-time":"2021-06-06T00:00:00Z","timestamp":1622937600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,6,6]]},"DOI":"10.1109\/icassp39728.2021.9415062","type":"proceedings-article","created":{"date-parts":[[2021,5,13]],"date-time":"2021-05-13T19:53:45Z","timestamp":1620935625000},"page":"6503-6507","source":"Crossref","is-referenced-by-count":8,"title":["Minimum Bayes Risk Training for End-to-End Speaker-Attributed ASR"],"prefix":"10.1109","author":[{"given":"Naoyuki","family":"Kanda","sequence":"first","affiliation":[]},{"given":"Zhong","family":"Meng","sequence":"additional","affiliation":[]},{"given":"Liang","family":"Lu","sequence":"additional","affiliation":[]},{"given":"Yashesh","family":"Gaur","sequence":"additional","affiliation":[]},{"given":"Xiaofei","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Zhuo","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Takuya","family":"Yoshioka","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1929"},{"key":"ref32","doi-asserted-by":"crossref","first-page":"2616","DOI":"10.21437\/Interspeech.2017-950","article-title":"Voxceleb: A large-scale speaker identification dataset","author":"nagrani","year":"2017","journal-title":"Proc INTERSPEECH"},{"key":"ref31","article-title":"The Kaldi speech recognition toolkit","author":"povey","year":"2011","journal-title":"ASRU"},{"key":"ref30","first-page":"5206","article-title":"Lib-rispeech: an ASR corpus based on public domain audio books","author":"panayotov","year":"2015","journal-title":"Proc ICASSP"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1007"},{"key":"ref36","first-page":"577","article-title":"Attention-based models for speech recognition","author":"chorowski","year":"2015","journal-title":"Proc NIPS"},{"key":"ref35","article-title":"Layer normalization","author":"ba","year":"2016","journal-title":"arXiv preprint arXiv 1607 06450"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003826"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1126"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003884"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682572"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1943"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3039"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9004009"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1085"},{"key":"ref17","article-title":"Investigation of end-to-end speaker-attributed ASR for continuous multi-talker recordings","author":"kanda","year":"2020","journal-title":"arXiv preprint arXiv 2008 06439"},{"key":"ref18","doi-asserted-by":"crossref","first-page":"2345","DOI":"10.21437\/Interspeech.2013-548","article-title":"Sequence-discriminative training of deep neural networks","volume":"2013","author":"vesel?","year":"2013","journal-title":"Proc INTERSPEECH"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6638951"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-0999"},{"key":"ref4","first-page":"276","article-title":"Advances in online audio-visual meeting transcription","author":"yoshioka","year":"2019","journal-title":"Proc ASRU"},{"key":"ref27","first-page":"4052","article-title":"Deep neural networks for small footprint text-dependent speaker verification","author":"variani","year":"2014","journal-title":"Proc ICASSP"},{"key":"ref3","first-page":"28","article-title":"The AMI meeting corpus: A pre-announcement","author":"carletta","year":"2005","journal-title":"5th International Workshop on Machine Learning for Multimodal Interaction"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952154"},{"key":"ref29","article-title":"Sequence transduction with recurrent neural networks","author":"graves","year":"2012","journal-title":"arXiv preprint arXiv 1211 3711"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7471631"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1244"},{"key":"ref7","doi-asserted-by":"crossref","first-page":"2456","DOI":"10.21437\/Interspeech.2017-305","article-title":"Recognizing multi-talker speech with permutation invariant training","author":"yu","year":"2017","journal-title":"Proc INTERSPEECH"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2003.1198793"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8682273"},{"key":"ref1","first-page":"373","article-title":"The rich transcription 2007 meeting recognition evaluation","author":"fiscus","year":"2007","journal-title":"Multimodal Technologies for Perception of Humans"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-79"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953079"},{"key":"ref21","first-page":"604","article-title":"Acoustic modelling with CD-CTC-SMBR LSTM RNNs","author":"sak","year":"2015","journal-title":"Proc ASRU"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1557"},{"key":"ref23","article-title":"Minimum Bayes risk training of RNN-Transducer for end-to-end speech recognition","author":"weng","year":"2019","journal-title":"arXiv preprint arXiv 1911 12487"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1030"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461809"}],"event":{"name":"ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2021,6,6]]},"location":"Toronto, ON, Canada","end":{"date-parts":[[2021,6,11]]}},"container-title":["ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9413349\/9413350\/09415062.pdf?arnumber=9415062","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,30]],"date-time":"2024-08-30T21:06:49Z","timestamp":1725052009000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9415062\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,6,6]]},"references-count":37,"URL":"https:\/\/doi.org\/10.1109\/icassp39728.2021.9415062","relation":{},"subject":[],"published":{"date-parts":[[2021,6,6]]}}}