{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,26]],"date-time":"2026-02-26T23:35:34Z","timestamp":1772148934573,"version":"3.50.1"},"reference-count":58,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Tencent AI Lab Rhino-Bird Focused Research Program","award":["RBFR2022004"],"award-info":[{"award-number":["RBFR2022004"]}]},{"name":"Shenzhen Science &amp; Technology Research Program","award":["GXWD20201231165807007-20200814115301001"],"award-info":[{"award-number":["GXWD20201231165807007-20200814115301001"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE\/ACM Trans. Audio Speech Lang. Process."],"published-print":{"date-parts":[[2023]]},"DOI":"10.1109\/taslp.2022.3198555","type":"journal-article","created":{"date-parts":[[2022,8,19]],"date-time":"2022-08-19T19:36:31Z","timestamp":1660937791000},"page":"25-38","source":"Crossref","is-referenced-by-count":10,"title":["Integrating Lattice-Free MMI Into End-to-End Speech Recognition"],"prefix":"10.1109","volume":"31","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2129-471X","authenticated-orcid":false,"given":"Jinchuan","family":"Tian","sequence":"first","affiliation":[{"name":"Tencent AI Lab, Bellevue, WA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2449-1436","authenticated-orcid":false,"given":"Jianwei","family":"Yu","sequence":"additional","affiliation":[{"name":"Tencent AI Lab, Bellevue, WA, USA"}]},{"given":"Chao","family":"Weng","sequence":"additional","affiliation":[{"name":"Tencent AI Lab, Bellevue, WA, USA"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9999-6140","authenticated-orcid":false,"given":"Yuexian","family":"Zou","sequence":"additional","affiliation":[{"name":"Advanced Data and Signal Processing Laboratory, School of Electric and Computer Science, Peking University Shenzhen Graduate School, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0520-6844","authenticated-orcid":false,"given":"Dong","family":"Yu","sequence":"additional","affiliation":[{"name":"Tencent AI Lab, Bellevue, WA, USA"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414858"},{"key":"ref57","article-title":"Differentiable weighted finite-state transducers","author":"hannun","year":"2020"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.15622\/sp.58.4"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1016\/B978-0-08-051584-7.50027-9"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1983"},{"key":"ref58","article-title":"SpeechBrain: A general-purpose speech toolkit","author":"ravanelli","year":"2021"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1456"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2022.3154241"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1080\/02564602.2015.1010611"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU51503.2021.9688056"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-020-10073-7"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/DSLW51110.2021.9523402"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2013-548"},{"key":"ref16","first-page":"i-105?i-108","article-title":"Minimum phone error and I-smoothing for improved discriminative training","author":"povey","year":"0","journal-title":"Proc IEEE Int Conf Acoust Speech Signal Process"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2009-17"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2007.366914"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2680"},{"key":"ref50","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"2014"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICSDA.2017.8384449"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053040"},{"key":"ref48","first-page":"448","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","author":"ioffe","year":"0","journal-title":"Proc Int Conf Mach Learn"},{"key":"ref47","article-title":"AISHELL-2: Transforming mandarin asr research into industrial scale","author":"du","year":"2018"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2010.5495100"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2011.03.001"},{"key":"ref44","article-title":"Model-based approaches to robust speech recognition in diverse environments","author":"wang","year":"2015","journal-title":"Ph D Dissertation"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.coling-main.398"},{"key":"ref49","first-page":"3","article-title":"Group normalization","author":"wu","year":"0","journal-title":"Proc Eur Conf Comput Vis"},{"key":"ref8","article-title":"Sequence transduction with recurrent neural networks","author":"graves","year":"2012"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2017.2763455"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1705"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461404"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414560"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472621"},{"key":"ref5","article-title":"Monotonic chunkwise attention","author":"chiu*","year":"0","journal-title":"Proc Int Conf Learn Representations"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-02562-4"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-2075"},{"key":"ref34","article-title":"On minimum word error rate training of the hybrid autoregressive transducer","author":"lu","year":"2020"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054235"},{"key":"ref36","article-title":"Improving rare word recognition with LM-aware MWER training","author":"wang","year":"2022"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461809"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1030"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1221"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1557"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref1","first-page":"6000","article-title":"Attention is all you need","author":"vaswani","year":"0","journal-title":"Proc 31st Int Conf Neural Inf Process Syst"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9746579"},{"key":"ref38","article-title":"Input length matters: An empirical study of RNN-T and MWER training for long-form telephony speech recognition","author":"lu","year":"2021"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3053"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1423"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639684"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2008.4518545"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2006-603"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-595"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2009.4960445"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2017.8268947"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462331"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2675"}],"container-title":["IEEE\/ACM Transactions on Audio, Speech, and Language Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/6570655\/9970249\/09855847.pdf?arnumber=9855847","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,2,21]],"date-time":"2023-02-21T22:17:57Z","timestamp":1677017877000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9855847\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"references-count":58,"URL":"https:\/\/doi.org\/10.1109\/taslp.2022.3198555","relation":{},"ISSN":["2329-9290","2329-9304"],"issn-type":[{"value":"2329-9290","type":"print"},{"value":"2329-9304","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]}}}