{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T14:30:14Z","timestamp":1774449014173,"version":"3.50.1"},"reference-count":66,"publisher":"IEEE","license":[{"start":{"date-parts":[[2021,12,13]],"date-time":"2021-12-13T00:00:00Z","timestamp":1639353600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2021,12,13]],"date-time":"2021-12-13T00:00:00Z","timestamp":1639353600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/100000001","name":"National Science Foundation","doi-asserted-by":"publisher","award":["ACI-1548562,ACI-1445606"],"award-info":[{"award-number":["ACI-1548562,ACI-1445606"]}],"id":[{"id":"10.13039\/100000001","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2021,12,13]]},"DOI":"10.1109\/asru51503.2021.9688157","type":"proceedings-article","created":{"date-parts":[[2022,2,3]],"date-time":"2022-02-03T20:31:00Z","timestamp":1643920260000},"page":"47-54","source":"Crossref","is-referenced-by-count":34,"title":["A Comparative Study on Non-Autoregressive Modelings for Speech-to-Text Generation"],"prefix":"10.1109","author":[{"given":"Yosuke","family":"Higuchi","sequence":"first","affiliation":[{"name":"Waseda University"}]},{"given":"Nanxin","family":"Chen","sequence":"additional","affiliation":[{"name":"Johns Hopkins University"}]},{"given":"Yuya","family":"Fujita","sequence":"additional","affiliation":[{"name":"Yahoo Japan Corporation"}]},{"given":"Hirofumi","family":"Inaguma","sequence":"additional","affiliation":[{"name":"Kyoto University"}]},{"given":"Tatsuya","family":"Komatsu","sequence":"additional","affiliation":[{"name":"LINE Corporation"}]},{"given":"Jaesong","family":"Lee","sequence":"additional","affiliation":[{"name":"Naver Corporation"}]},{"given":"Jumon","family":"Nozaki","sequence":"additional","affiliation":[{"name":"Kyoto University"}]},{"given":"Tianzi","family":"Wang","sequence":"additional","affiliation":[{"name":"Johns Hopkins University"}]},{"given":"Shinji","family":"Watanabe","sequence":"additional","affiliation":[{"name":"Carnegie Mellon University"}]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU51503.2021.9688238"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1456"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1906"},{"key":"ref32","first-page":"4171","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"devlin","year":"0","journal-title":"Proc NAACLHLT"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414198"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1437"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054250"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-911"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414594"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.29007\/3b2l"},{"key":"ref60","first-page":"5998","article-title":"Attention is all you need","author":"ashish","year":"0","journal-title":"Proc NeurIPS"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9415093"},{"key":"ref61","first-page":"523","article-title":"Towards better decoding and language model integration in sequence to sequence models","author":"chorowski","year":"0","journal-title":"Proc INTERSPEECH"},{"key":"ref63","article-title":"Improved speech-to-text translation with the Fisher and Callhome Spanish-English speech translation corpus","author":"post","year":"0","journal-title":"Proc IWSLT"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1633"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D16-1139"},{"key":"ref27","first-page":"11181","article-title":"Levenshtein Transformer","author":"gu","year":"0","journal-title":"Proc NeurIPS"},{"key":"ref65","doi-asserted-by":"crossref","first-page":"62","DOI":"10.1109\/MCSE.2014.80","article-title":"XSEDE: Accelerating scientific discovery","volume":"16","author":"john","year":"2014","journal-title":"Computing in Science & Engineering"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1145\/2792745.2792775"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.83"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6638947"},{"key":"ref1","article-title":"Deep neural networks for acoustic modeling in speech recognition: The shared views of four research groups","volume":"29","author":"geoffrey","year":"2012","journal-title":"IEEE Signal Process Mag"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1600"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2086"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1619"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-1336"},{"key":"ref23","first-page":"1920","article-title":"AlignRefine: Non-autoregressive speech recognition via iterative re-alignment","author":"chi","year":"0","journal-title":"Proc NAACLHLT"},{"key":"ref26","first-page":"5976","article-title":"Insertion Transformer: Flexible sequence generation via insertion operations","author":"stern","year":"0","journal-title":"Proc ICML"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-1149"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref51","first-page":"3935","article-title":"Enhancing the TED-LIVM corpus with selected data for language modeling and more TED talks","author":"rousseau","year":"0","journal-title":"Porc of LREC"},{"key":"ref59","article-title":"Adam: A method for stochastic optimization","author":"kingma","year":"0","journal-title":"Proc ICLR"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2680"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2015-711"},{"key":"ref56","article-title":"The Kaldi speech recognition toolkit","author":"povey","year":"0","journal-title":"Proc ASRU"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.3115\/1075527.1075614"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1016\/j.csl.2016.11.005"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1007"},{"key":"ref52","first-page":"1","article-title":"Spontaneous speech corpus of Japanese","author":"maekawa","year":"0","journal-title":"Proc LREC"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053889"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref40","first-page":"206","article-title":"Exploring neural transducers for end-to-end speech recognition","author":"battenberg","year":"0","journal-title":"Proc ASRU"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462105"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1780"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003750"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"},{"key":"ref16","article-title":"Non-autoregressive neural machine translation","author":"jiatao","year":"0","journal-title":"Proc ICLR"},{"key":"ref17","article-title":"Non-autoregressive transformer for speech recognition","author":"chen","year":"2020","journal-title":"IEEE Signal Process Lett"},{"key":"ref18","first-page":"1403","article-title":"Imputer: Sequence modelling via imputation and dynamic programming","author":"chan","year":"0","journal-title":"Proc ICML"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2404"},{"key":"ref4","first-page":"577","article-title":"Attention-based models for speech recognition","author":"chorowski","year":"0","journal-title":"Proc NeurIPS"},{"key":"ref3","first-page":"1764","article-title":"Towards end-to-end speech recognition with recurrent neural networks","author":"graves","year":"0","journal-title":"Proc ICML"},{"key":"ref6","article-title":"Sequence transduction with recurrent neural networks","author":"graves","year":"2012","journal-title":"ArXiv Preprint"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472621"},{"key":"ref8","article-title":"Neural machine translation by jointly learning to align and translate","author":"bahdanau","year":"0","journal-title":"Proc ICLR"},{"key":"ref7","first-page":"3104","article-title":"Sequence to sequence learning with neural networks","author":"sutskever","year":"0","journal-title":"Proc NeurIPS"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414858"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462506"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/DSLW51110.2021.9523402"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1145\/1390156.1390294"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1938"},{"key":"ref47","doi-asserted-by":"crossref","first-page":"1240","DOI":"10.1109\/JSTSP.2017.2763455","article-title":"Hybrid CTC\/attention architecture for end-to-end speech recognition","volume":"11","author":"shinji","year":"2017","journal-title":"IEEE Journal of Selected Topics in Signal Processing"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-337"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052964"},{"key":"ref44","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","author":"baevski","year":"0","journal-title":"Proc NeurIPS"},{"key":"ref43","article-title":"Citrinet: Closing the gap between non-autoregressive and autoregressive end-to-end models for automatic speech recognition","author":"majumdar","year":"2021","journal-title":"ArXiv Preprint"}],"event":{"name":"2021 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Cartagena, Colombia","start":{"date-parts":[[2021,12,13]]},"end":{"date-parts":[[2021,12,17]]}},"container-title":["2021 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9687821\/9687855\/09688157.pdf?arnumber=9688157","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,5,16]],"date-time":"2022-05-16T20:41:17Z","timestamp":1652733677000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9688157\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2021,12,13]]},"references-count":66,"URL":"https:\/\/doi.org\/10.1109\/asru51503.2021.9688157","relation":{},"subject":[],"published":{"date-parts":[[2021,12,13]]}}}