{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,29]],"date-time":"2026-04-29T18:04:55Z","timestamp":1777485895978,"version":"3.51.4"},"reference-count":30,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2023,11,8]],"date-time":"2023-11-08T00:00:00Z","timestamp":1699401600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,11,8]],"date-time":"2023-11-08T00:00:00Z","timestamp":1699401600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62071302"],"award-info":[{"award-number":["62071302"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J Speech Technol"],"published-print":{"date-parts":[[2023,12]]},"DOI":"10.1007\/s10772-023-10050-z","type":"journal-article","created":{"date-parts":[[2023,11,8]],"date-time":"2023-11-08T17:02:09Z","timestamp":1699462929000},"page":"895-902","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Boosting Character-based Mandarin ASR via Chinese Pinyin Representation"],"prefix":"10.1007","volume":"26","author":[{"given":"Li","family":"Li","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0924-408X","authenticated-orcid":false,"given":"Yanhua","family":"Long","sequence":"additional","affiliation":[]},{"given":"Dongxing","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Yijie","family":"Li","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,11,8]]},"reference":[{"key":"10050_CR1","doi-asserted-by":"crossref","unstructured":"Bahdanau, D., Chorowski, J., Serdyuk, D., et al. (2016). End-to-end attention-based large vocabulary speech recognition. In Proceeding of ICASSP, pp. 4945\u20134949","DOI":"10.1109\/ICASSP.2016.7472618"},{"key":"10050_CR2","doi-asserted-by":"crossref","unstructured":"Boyer, F., Shinohara, Y., Ishii, T., Inaguma, H., & Watanabe, S. (2021). A study of transducer based end-to-end ASR with ESPnet: Architecture, auxiliary loss and decoding strategies. In IEEE automatic speech recognition and understanding workshop (ASRU), pp. 16\u201323","DOI":"10.1109\/ASRU51503.2021.9688251"},{"key":"10050_CR3","doi-asserted-by":"crossref","unstructured":"Bu, H., Du, J., Na, X., Wu, B., & Zheng, H. (2017). AISHELL-1: An open-source Mandarin speech corpus and a speech recognition baseline. In Proceeding of O-COCOSDA, (pp. 1\u20135).","DOI":"10.1109\/ICSDA.2017.8384449"},{"key":"10050_CR4","doi-asserted-by":"crossref","unstructured":"Chan, W., Lane, I. (2016). On online attention-based speech recognition and joint Mandarin Character-Pinyin training. In Proceeding of Interspeech, (pp. 3404\u20133408).","DOI":"10.21437\/Interspeech.2016-334"},{"key":"10050_CR5","doi-asserted-by":"crossref","unstructured":"Chen, S., Hu, X., Li, S., & Xu, X. (2021). An investigation of using hybrid modeling units for improving end-to-end speech recognition system. In Proceeding of ICASSP, (pp. 6743\u20136747).","DOI":"10.1109\/ICASSP39728.2021.9414598"},{"key":"10050_CR6","doi-asserted-by":"crossref","unstructured":"Chiu, C.-C., Sainath, T. N., Wu, Y., et al. (2018). State-of-the-art speech recognition with sequence-to-sequence models. In Proceeding of ICASSP, (pp. 4774\u20134778).","DOI":"10.1109\/ICASSP.2018.8462105"},{"key":"10050_CR7","doi-asserted-by":"crossref","unstructured":"Graves, A. (2012). Sequence transduction with recurrent neural networks. In Proceeding of ICML","DOI":"10.1007\/978-3-642-24797-2"},{"key":"10050_CR8","doi-asserted-by":"crossref","unstructured":"Graves, A., Fernandez, S., Gomez, F., & Schmidhuber, J. (2006). Connectionist temporal classification: Labelling unsegmented sequence data with recurrent neural networks. In Proceeding of ICML, (pp. 369\u2013376).","DOI":"10.1145\/1143844.1143891"},{"key":"10050_CR9","doi-asserted-by":"crossref","unstructured":"Gulati, A., Qin, J., Chiu, C.-C., et al. (2020). Conformer: Convolution-augmented transformer for speech recognition. In Proceeding of Interspeech, (pp. 5036\u20135040).","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"10050_CR10","doi-asserted-by":"crossref","unstructured":"Huang, W., Hu, W., Yeung, Y. T., & Chen, X. (2020). Conv-Transformer Transducer: Low latency, low frame rate, streamable end-to-end speech recognition. In Proceeding of Interspeech, (pp. 5001\u20135005)","DOI":"10.21437\/Interspeech.2020-2361"},{"key":"10050_CR11","doi-asserted-by":"crossref","unstructured":"Jeon, J.-J., & Kim, E. (2021). Multitask learning and joint optimization for transformer-RNN-transducer speech recognition. In Proceeding of ICASSP, (pp. 6793\u20136797).","DOI":"10.1109\/ICASSP39728.2021.9414911"},{"key":"10050_CR12","doi-asserted-by":"crossref","unstructured":"Karita, S., Chen, N., Hayashi, T., et al. (2019). A comparative study on transformer vs RNN in speech applications. In Proceeding of ASRU, (pp. 449\u2013456).","DOI":"10.1109\/ASRU46091.2019.9003750"},{"key":"10050_CR13","doi-asserted-by":"crossref","unstructured":"Li, J., Wu, Y., Gaur, Y., Wang, C., Zhao, R., & Liu, S. (2020). On the comparison of popular end-to-end models for large scale speech recognition. In Proceeding of Interspeech, (pp. 1\u20135).","DOI":"10.21437\/Interspeech.2020-2846"},{"key":"10050_CR14","doi-asserted-by":"crossref","unstructured":"Li, J., Ye, G., Das, A., Zhao, R., & Gong, Y. (2018). Advancing acoustic-to-word CTC model. In Proceeding of ICASSP, (pp. 5794\u20135798).","DOI":"10.1109\/ICASSP.2018.8462017"},{"issue":"1","key":"10050_CR15","first-page":"8","volume":"11","author":"J Li","year":"2020","unstructured":"Li, J. (2020). Recent advances in end-to-end automatic speech recognition. APSIPA Transactions on Signal and Information Processing, 11(1), 8.","journal-title":"APSIPA Transactions on Signal and Information Processing"},{"key":"10050_CR16","doi-asserted-by":"crossref","unstructured":"Miao, H., Cheng, G., Gao, C., et al. (2020). Transformer-based online CTC\/attention end-to-end speech recognition architecture. In Proceeding of ICASSP, (pp. 6084\u20136088).","DOI":"10.1109\/ICASSP40776.2020.9053165"},{"key":"10050_CR17","doi-asserted-by":"crossref","unstructured":"Padi, B., Mohan, A., & Ganapathy, S. (2019). Attention based hybrid i-vector BLSTM model for language recognition. In Proceeding of Interspeech, (pp. 1263\u20131267).","DOI":"10.21437\/Interspeech.2019-2371"},{"key":"10050_CR18","doi-asserted-by":"crossref","unstructured":"Rao, K., Sak, H., & Prabhavalkar, R. (2017). Exploring architectures, data and units for streaming end-to-end speech recognition with RNN-transducer. In IEEE automatic speech recognition and understanding workshop (ASRU), (pp. 193\u2013199).","DOI":"10.1109\/ASRU.2017.8268935"},{"key":"10050_CR19","doi-asserted-by":"crossref","unstructured":"Sennrich, R., Haddow, B., & Birch, A. (2016). Neural machine translation of rare words with subword units. In Proceeding of ACL, (pp. 1715\u20131725).","DOI":"10.18653\/v1\/P16-1162"},{"key":"10050_CR20","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., & Wojna, Z. (2016). Rethinking the inception architecture for computer vision. In Proceeding of CVPR, (pp. 2818\u20132826).","DOI":"10.1109\/CVPR.2016.308"},{"key":"10050_CR21","doi-asserted-by":"crossref","unstructured":"Tanaka, T., Masumura, R., Moriya, T., et al. (2019). A joint end-to-end and DNN-HMM hybrid automatic speech recognition system with transferring sharable knowledge. In Proceeding of Interspeech, (pp. 2210\u20132214).","DOI":"10.21437\/Interspeech.2019-2263"},{"key":"10050_CR22","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., et al. (2017). Attention is all you need. In Proceeding of advances in neural information processing systems, (pp. 5998\u20136008)."},{"key":"10050_CR23","doi-asserted-by":"crossref","unstructured":"Wang, W., Wang, G., Bhatnagar, A., Zhou, Y., Xiong, C., & Socher, R. (2020). An investigation of phone-based subword units for end-to-end speech recognition. In Proceeding of Interspeech, (pp. 1778\u20131782).","DOI":"10.21437\/Interspeech.2020-1873"},{"key":"10050_CR24","doi-asserted-by":"crossref","unstructured":"Watanabe, S., Hori, T., Karita, S., et al. (2018). ESPnet: End-to-end speech processing toolkit. In Proceeding of Interspeech, (pp. 2207\u20132211)","DOI":"10.21437\/Interspeech.2018-1456"},{"key":"10050_CR25","doi-asserted-by":"crossref","unstructured":"Xiao, Z., Ou, Z., Chu, W., & Lin, H. (2018). Hybrid CTC-attention based end-to-end speech recognition using subword units. In Proceeding of ISCSLP, (pp. 146\u2013150).","DOI":"10.1109\/ISCSLP.2018.8706675"},{"key":"10050_CR26","unstructured":"Zeineldeen, M., Zeyer, A., Zhou, W., Ng, T., Schl\u00fcter, R., & Ney, H. (2020). A systematic comparison of grapheme-based vs. phoneme-based label units for encoder-decoder-attention models, arXiv preprint arXiv:2005.09336."},{"key":"10050_CR27","doi-asserted-by":"crossref","unstructured":"Zeyer, A., Irie, K., Schl\u00fcter, R., & Ney, H. (2018). Improved training of end-to-end attention models for speech recognition. In Proceeding of Interspeech, (pp. 7\u201311).","DOI":"10.21437\/Interspeech.2018-1616"},{"key":"10050_CR28","doi-asserted-by":"crossref","unstructured":"Zhang, S., Lei, M., Liu, Y., & Li, W. (2019). Investigation of modeling units for mandarin speech recognition using DFSMN-CTC-sMBR. In Proceeding of ICASSP, (pp. 7085\u20137089).","DOI":"10.1109\/ICASSP.2019.8683859"},{"key":"10050_CR29","doi-asserted-by":"crossref","unstructured":"Zhou, S., Dong, L., Xu, S., & Xu, B. (2018). A comparison of modeling units in sequence-to-sequence speech recognition with the transformer on Mandarin Chinese. In Neural information processing, (pp. 210\u2013220).","DOI":"10.1007\/978-3-030-04221-9_19"},{"key":"10050_CR30","doi-asserted-by":"crossref","unstructured":"Zou, W., Jiang, D., Zhao, S., Yang, G., & Li, X. (2018). Comparable study of modeling units for end-to-end Mandarin speech recognition. In Proceeding of ISCSLP, (pp. 369\u2013373).","DOI":"10.1109\/ISCSLP.2018.8706661"}],"container-title":["International Journal of Speech Technology"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10772-023-10050-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10772-023-10050-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10772-023-10050-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,11]],"date-time":"2024-01-11T10:12:45Z","timestamp":1704967965000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10772-023-10050-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,11,8]]},"references-count":30,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2023,12]]}},"alternative-id":["10050"],"URL":"https:\/\/doi.org\/10.1007\/s10772-023-10050-z","relation":{},"ISSN":["1381-2416","1572-8110"],"issn-type":[{"value":"1381-2416","type":"print"},{"value":"1572-8110","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,11,8]]},"assertion":[{"value":"27 February 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"12 September 2023","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 November 2023","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"the authors declare that they have no competing interests.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}]}}