{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,14]],"date-time":"2025-10-14T01:09:20Z","timestamp":1760404160605},"reference-count":25,"publisher":"IEEE","license":[{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2019,5,1]],"date-time":"2019-05-01T00:00:00Z","timestamp":1556668800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2019,5]]},"DOI":"10.1109\/icassp.2019.8683297","type":"proceedings-article","created":{"date-parts":[[2019,4,16]],"date-time":"2019-04-16T20:07:22Z","timestamp":1555445242000},"page":"7080-7084","source":"Crossref","is-referenced-by-count":12,"title":["End-to-end Speech Recognition Using a High Rank LSTM-CTC Based Model"],"prefix":"10.1109","author":[{"given":"Yangyang","family":"Shi","sequence":"first","affiliation":[]},{"given":"Mei-Yuh","family":"Hwang","sequence":"additional","affiliation":[]},{"given":"Xin","family":"Lei","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2016.7472621"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-1705"},{"key":"ref12","article-title":"EESEN: End-to-end speech recognition using deep RNN models and WFST-based decoding","author":"miao","year":"2016","journal-title":"Proceedings of ASRU"},{"key":"ref13","article-title":"Improving the performance of online neural transducer models","author":"sainath","year":"2017","journal-title":"CoRR"},{"article-title":"DeepSpeech: Scaling up end-to-end speech recognition","year":"2014","author":"hannun","key":"ref14"},{"key":"ref15","article-title":"Deep Speech 2: end-to-end speech recognition in English and Mandarin","author":"amodei","year":"2015","journal-title":"CoRR"},{"key":"ref16","first-page":"577","article-title":"Attention-based models for speech recognition","author":"chorowski","year":"2015","journal-title":"Proceedings of NIPS"},{"key":"ref17","article-title":"Sequence transduction with recurrent neural networks","author":"graves","year":"2012","journal-title":"CoRR"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1616"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P17-1048"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953075"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2018.8461558"},{"key":"ref6","article-title":"Towards end-to-end speech recognition with recurrent neural networks","author":"graves","year":"2014","journal-title":"JMLR Workshop and Conference Proceedings"},{"key":"ref5","article-title":"Improved training for online end-to-end speech recognition systems","author":"kim","year":"2017","journal-title":"CoRR"},{"key":"ref8","article-title":"State-of-the-art speech recognition with sequence-to-sequence models","author":"chiu","year":"2017","journal-title":"CoRR"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1145\/1143844.1143891"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7953159"},{"key":"ref9","article-title":"Exploring neural transducers for end-to-end speech recognition","author":"battenberg","year":"2018","journal-title":"Proceedings of ASRU"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2012.2205597"},{"key":"ref20","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2015-350","article-title":"Fast and accurate recurrent neural network acoustic models for speech recognition","author":"sak","year":"2015"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.3115\/1075527.1075614"},{"key":"ref21","first-page":"1","article-title":"Breaking the softmax bottleneck: A high-rank RNN language model","author":"yang","year":"2017","journal-title":"CoRR"},{"key":"ref24","article-title":"Improving end-to-end speech recognition with policy learning","author":"zhou","year":"2017","journal-title":"Proceedings of ICASSP"},{"key":"ref23","article-title":"Lib-rispeech: An ASR corpus based on public domain audio books","author":"panayotov","year":"2015","journal-title":"Processing of ICASSP"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1456"}],"event":{"name":"ICASSP 2019 - 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","start":{"date-parts":[[2019,5,12]]},"location":"Brighton, United Kingdom","end":{"date-parts":[[2019,5,17]]}},"container-title":["ICASSP 2019 - 2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/8671773\/8682151\/08683297.pdf?arnumber=8683297","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,9,15]],"date-time":"2023-09-15T20:16:39Z","timestamp":1694808999000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/8683297\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,5]]},"references-count":25,"URL":"https:\/\/doi.org\/10.1109\/icassp.2019.8683297","relation":{},"subject":[],"published":{"date-parts":[[2019,5]]}}}