{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,26]],"date-time":"2025-03-26T00:22:07Z","timestamp":1742948527017,"version":"3.40.3"},"publisher-location":"Cham","reference-count":16,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031442001"},{"type":"electronic","value":"9783031442018"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-44201-8_7","type":"book-chapter","created":{"date-parts":[[2023,9,22]],"date-time":"2023-09-22T08:03:20Z","timestamp":1695369800000},"page":"78-90","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["Multi-task Pre-training for\u00a0Lhasa-Tibetan Speech Recognition"],"prefix":"10.1007","author":[{"given":"Yigang","family":"Liu","sequence":"first","affiliation":[]},{"given":"Yue","family":"Zhao","sequence":"additional","affiliation":[]},{"given":"Xiaona","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Liang","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Xubei","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,9,23]]},"reference":[{"key":"7_CR1","unstructured":"Hendrycks, D., Lee, K., Mazeika, M.: Using pre-training can improve model robustness and uncertainty. In: International Conference on Machine Learning, pp. 2712\u20132721. PMLR (2019)"},{"key":"7_CR2","unstructured":"Fan, Z., Zhou, S., Xu, B. Unsupervised pre-training for sequence to sequence speech recognition (2019)"},{"key":"7_CR3","doi-asserted-by":"publisher","first-page":"14","DOI":"10.3389\/fcomp.2020.00014","volume":"2","author":"M Lech","year":"2020","unstructured":"Lech, M., Stolar, M., Best, C., Bolia, R.: Real-Time speech emotion recognition using a pre-trained image classification network: effects of bandwidth reduction and companding. Front. Comput. Sci. 2, 14 (2020). https:\/\/doi.org\/10.3389\/fcomp.2020.00014","journal-title":"Front. Comput. Sci."},{"key":"7_CR4","doi-asserted-by":"crossref","unstructured":"Bansal, S., Kamper, H., Livescu, K., et al.: Pre-training on high-resource speech recognition improves low-resource speech-to-text translation. arXiv preprint arXiv:1809.01431 (2018)","DOI":"10.21437\/Interspeech.2018-1326"},{"key":"7_CR5","doi-asserted-by":"publisher","first-page":"133","DOI":"10.3390\/info12030133","volume":"12","author":"W Zhang","year":"2021","unstructured":"Zhang, W., Li, X., Yang, Y., Dong, R.: Pre-training on mixed data for low-resource neural machine translation. Information 12, 133 (2021)","journal-title":"Information"},{"key":"7_CR6","doi-asserted-by":"publisher","unstructured":"Pan, L.: Research on low resource multilingual speech recognition based on transfer learning. Tianjin University (2019). gtjdu.2019.004688. https:\/\/doi.org\/10.27356\/d.cnki","DOI":"10.27356\/d.cnki"},{"issue":"04","key":"7_CR7","doi-asserted-by":"publisher","first-page":"359","DOI":"10.16451\/j.cnki.issn1003-6059.201704008","volume":"30","author":"Q Wang","year":"2017","unstructured":"Wang, Q., Guo, W., Xie, C.: Tibetan speech recognition based on end-to-end technology. Pattern Recogn. Artif. Intell. 30(04), 359\u2013364 (2017). https:\/\/doi.org\/10.16451\/j.cnki.issn1003-6059.201704008","journal-title":"Pattern Recogn. Artif. Intell."},{"key":"7_CR8","doi-asserted-by":"crossref","unstructured":"Yan, J., Lv, Z., Huang, S., et al.: Low-resource tibetan dialect acoustic modeling based on transfer learning. In: SLTU, pp. 6\u201310 (2018)","DOI":"10.21437\/SLTU.2018-2"},{"key":"7_CR9","doi-asserted-by":"publisher","first-page":"2","DOI":"10.1186\/s13636-021-00233-4","volume":"2022","author":"S Qin","year":"2022","unstructured":"Qin, S., Wang, L., Li, S., et al.: Improving low-resource Tibetan end-to-end ASR by multilingual and multilevel unit modeling. J. Audio Speech Music Proc. 2022, 2 (2022)","journal-title":"J. Audio Speech Music Proc."},{"issue":"1","key":"7_CR10","doi-asserted-by":"publisher","first-page":"629","DOI":"10.32604\/cmc.2022.027092","volume":"73","author":"Z Wang","year":"2022","unstructured":"Wang, Z., Zhao, Y., Wu, L., et al.: Cross-language transfer learning-based Lhasa-Tibetan speech recognition. CMC-Comput. Mater. Continua 73(1), 629\u2013639 (2022)","journal-title":"CMC-Comput. Mater. Continua"},{"key":"7_CR11","doi-asserted-by":"crossref","unstructured":"Sun, Y., Wang, S., Li, Y., et al.: Ernie 2.0: a continual pre-training framework for language understanding. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, no. 05, pp. 8968\u20138975 (2020)","DOI":"10.1609\/aaai.v34i05.6428"},{"key":"7_CR12","unstructured":"Lu, Y., Li, Z., He, D., et al.: Understanding and improving transformer from a multi-particle dynamic system point of view. arXiv preprint arXiv:1906.02762 (2019)"},{"key":"7_CR13","doi-asserted-by":"crossref","unstructured":"Gulati, A., Qin, J., Chiu, C.C., et al.: Conformer: convolution-augmented transformer for speech recognition (2020)","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"7_CR14","doi-asserted-by":"crossref","unstructured":"Bu, H., Du, J., Na, X., et al.: Aishell-1: an open-source mandarin speech corpus and a speech recognition baseline. In: 2017 20th Conference of the Oriental Chapter of the International Coordinating Committee on Speech Databases and Speech I\/O Systems and Assessment (O-COCOSDA), pp. 1\u20135. IEEE (2017)","DOI":"10.1109\/ICSDA.2017.8384449"},{"key":"7_CR15","doi-asserted-by":"crossref","unstructured":"Panayotov, V., Chen, G., Povey, D., et al.: Librispeech: an ASR corpus based on public domain audio books. In: 2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 5206\u20135210. IEEE (2015)","DOI":"10.1109\/ICASSP.2015.7178964"},{"issue":"2\u20133","key":"7_CR16","first-page":"297","volume":"22","author":"Y Zhao","year":"2020","unstructured":"Zhao, Y., Xu, X., Yue, J., et al.: An open speech resource for Tibetan multi-dialect and multitask recognition. Int. J. Comput. Sci. Eng. 22(2\u20133), 297\u2013304 (2020)","journal-title":"Int. J. Comput. Sci. Eng."}],"container-title":["Lecture Notes in Computer Science","Artificial Neural Networks and Machine Learning \u2013 ICANN 2023"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-44201-8_7","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,9,22]],"date-time":"2023-09-22T08:04:13Z","timestamp":1695369853000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-44201-8_7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031442001","9783031442018"],"references-count":16,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-44201-8_7","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"23 September 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ICANN","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Artificial Neural Networks","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Heraklion","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Greece","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"26 September 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"32","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"icann2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/e-nns.org\/icann2023\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Single-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"easyacademia.org","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"947","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"426","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"22","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"45% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"2.4","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"4","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"No","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"type of other papers accepted  : 9 Abstract","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}