{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T19:56:10Z","timestamp":1776887770517,"version":"3.51.2"},"reference-count":27,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,6,4]],"date-time":"2023-06-04T00:00:00Z","timestamp":1685836800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,6,4]]},"DOI":"10.1109\/icassp49357.2023.10096154","type":"proceedings-article","created":{"date-parts":[[2023,5,5]],"date-time":"2023-05-05T17:28:30Z","timestamp":1683307710000},"page":"1-5","source":"Crossref","is-referenced-by-count":5,"title":["Fast-U2++: Fast and Accurate End-to-End Speech Recognition in Joint CTC\/Attention Frames"],"prefix":"10.1109","author":[{"given":"Chengdong","family":"Liang","sequence":"first","affiliation":[{"name":"Northwestern Polytechnical University,School of Marine Science and Technology,Xi&#x2019;an,China"}]},{"given":"Xiao-Lei","family":"Zhang","sequence":"additional","affiliation":[{"name":"Northwestern Polytechnical University,School of Marine Science and Technology,Xi&#x2019;an,China"}]},{"given":"BinBin","family":"Zhang","sequence":"additional","affiliation":[{"name":"Horizon Robotics,Beijing,China"}]},{"given":"Di","family":"Wu","sequence":"additional","affiliation":[{"name":"Horizon Robotics,Beijing,China"}]},{"given":"Shengqiang","family":"Li","sequence":"additional","affiliation":[{"name":"Horizon Robotics,Beijing,China"}]},{"given":"Xingchen","family":"Song","sequence":"additional","affiliation":[{"name":"Horizon Robotics,Beijing,China"}]},{"given":"Zhendong","family":"Peng","sequence":"additional","affiliation":[{"name":"Horizon Robotics,Beijing,China"}]},{"given":"Fuping","family":"Pan","sequence":"additional","affiliation":[{"name":"Horizon Robotics,Beijing,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3016"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054188"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"},{"key":"ref4","first-page":"173","article-title":"Deep speech 2: End-to-end speech recognition in english and mandarin","volume-title":"ICML.","author":"Amodei","year":"2016"},{"key":"ref5","article-title":"Sequence transduction with recurrent neural networks","author":"Graves","year":"2012"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6638947"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472621"},{"key":"ref8","article-title":"Attention-based models for speech recognition","volume":"28","author":"Chorowski","year":"2015","journal-title":"Advances in neural information processing systems"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953075"},{"key":"ref10","article-title":"U2++: Unified two-pass bidirectional end-to-end model for speech recognition","author":"Wu","year":"2021"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8462506"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref13","article-title":"Dual-mode asr: Unify and improve streaming asr with full-context modeling","author":"Yu","year":"2021","journal-title":"ICLR"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414607"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1693"},{"key":"ref16","article-title":"Unified streaming and non-streaming two-pass end-to-end model for speech recognition","author":"Zhang","year":"2020"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1983"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-483"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1887"},{"issue":"7","key":"ref20","article-title":"Dis-tilling the knowledge in a neural network","volume":"2","author":"Hinton","year":"2015"},{"key":"ref21","article-title":"Transformer-transducer: End-to-end speech recognition with self-attention","author":"Yeh","year":"2019"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2680"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1898"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054260"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1972"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-1780"},{"key":"ref27","article-title":"Wnars: Wfst based non-autoregressive streaming end-to-end speech recognition","author":"Wang","year":"2021"}],"event":{"name":"ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Rhodes Island, Greece","start":{"date-parts":[[2023,6,4]]},"end":{"date-parts":[[2023,6,10]]}},"container-title":["ICASSP 2023 - 2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10094559\/10094560\/10096154.pdf?arnumber=10096154","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,29]],"date-time":"2024-02-29T22:47:35Z","timestamp":1709246855000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10096154\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6,4]]},"references-count":27,"URL":"https:\/\/doi.org\/10.1109\/icassp49357.2023.10096154","relation":{},"subject":[],"published":{"date-parts":[[2023,6,4]]}}}