{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T15:29:44Z","timestamp":1775230184451,"version":"3.50.1"},"reference-count":25,"publisher":"IEEE","license":[{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2020,5,1]],"date-time":"2020-05-01T00:00:00Z","timestamp":1588291200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2020,5]]},"DOI":"10.1109\/icassp40776.2020.9054476","type":"proceedings-article","created":{"date-parts":[[2020,4,9]],"date-time":"2020-04-09T20:21:13Z","timestamp":1586463673000},"page":"6074-6078","source":"Crossref","is-referenced-by-count":114,"title":["Streaming Automatic Speech Recognition with the Transformer Model"],"prefix":"10.1109","author":[{"given":"Niko","family":"Moritz","sequence":"first","affiliation":[]},{"given":"Takaaki","family":"Hori","sequence":"additional","affiliation":[]},{"given":"Jonathan","family":"Le","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","article-title":"Improving the performance of online neural transducer models","author":"sainath","year":"2017"},{"key":"ref11","article-title":"Monotonic chunkwise attention","author":"chiu","year":"2018","journal-title":"Proc of the Int Conf on Learning Representations (ICLR)"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2019.8683510"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2017.2763455"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003920"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2837"},{"key":"ref16","first-page":"6000","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Proc NIPS"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003750"},{"key":"ref18","first-page":"949","article-title":"Advances in joint CTC-attention based end-to-end speech recognition with a deep CNN encoder and RNN-LM","author":"hori","year":"2017","journal-title":"Proc Inter-speech ISCA"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462497"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1938"},{"key":"ref6","article-title":"Neural machine translation by jointly learning to align and translate","author":"bahdanau","year":"2014"},{"key":"ref5","article-title":"Sequence transduction with recurrent neural networks","author":"graves","year":"2012"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU46091.2019.9003906"},{"key":"ref7","article-title":"An all-neural on-device speech recognizer","author":"schalkwyk","year":"2019"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2012.2205597"},{"key":"ref9","doi-asserted-by":"crossref","first-page":"939","DOI":"10.21437\/Interspeech.2017-233","article-title":"A comparison of sequence-to-sequence models for speech recognition","author":"prabhavalkar","year":"2017","journal-title":"Proc ISCA Interspeech"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2016-595"},{"key":"ref20","article-title":"First-pass large vocabulary continuous speech recognition using bidirectional recurrent DNNs","author":"maas","year":"2014"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref21","volume":"ldc94s13a","year":"1994","journal-title":"CSR-II (WSJ1) Complete"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D18-2012"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462506"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2680"}],"event":{"name":"ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Barcelona, Spain","start":{"date-parts":[[2020,5,4]]},"end":{"date-parts":[[2020,5,8]]}},"container-title":["ICASSP 2020 - 2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9040208\/9052899\/09054476.pdf?arnumber=9054476","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,6,28]],"date-time":"2022-06-28T00:15:08Z","timestamp":1656375308000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9054476\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2020,5]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/icassp40776.2020.9054476","relation":{},"subject":[],"published":{"date-parts":[[2020,5]]}}}