{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,6,26]],"date-time":"2025-06-26T06:02:19Z","timestamp":1750917739082,"version":"3.28.0"},"reference-count":16,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,11,29]],"date-time":"2022-11-29T00:00:00Z","timestamp":1669680000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,11,29]],"date-time":"2022-11-29T00:00:00Z","timestamp":1669680000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,11,29]]},"DOI":"10.1109\/snams58071.2022.10062774","type":"proceedings-article","created":{"date-parts":[[2023,3,15]],"date-time":"2023-03-15T17:30:26Z","timestamp":1678901426000},"page":"1-5","source":"Crossref","is-referenced-by-count":6,"title":["Multilingual Transformer Language Model for Speech Recognition in Low-resource Languages"],"prefix":"10.1109","author":[{"given":"Li","family":"Miao","sequence":"first","affiliation":[{"name":"Microsoft Corporation,Mountain View,United States"}]},{"given":"Jian","family":"Wu","sequence":"additional","affiliation":[{"name":"Microsoft Corporation,Seattle,United States"}]},{"given":"Piyush","family":"Behre","sequence":"additional","affiliation":[{"name":"Microsoft Corporation,Mountain View,United States"}]},{"given":"Shuangyu","family":"Chang","sequence":"additional","affiliation":[{"name":"Microsoft Corporation,Mountain View,United States"}]},{"given":"Sarangarajan","family":"Parthasarathy","sequence":"additional","affiliation":[{"name":"Microsoft Corporation,Mountain View,United States"}]}],"member":"263","reference":[{"volume-title":"Automatic Speech Recognition-A Deep Learning Approach","year":"2014","author":"Yu","key":"ref1"},{"article-title":"Multi-channel speech recognition: Lstms all the way through","volume-title":"CHiME 2016-4th International Workshop on Speech Processing in Everyday Environments","author":"Erdogan","key":"ref2"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2014.6854535"},{"key":"ref4","article-title":"Language modeling with deep transformers","volume":"abs\/1905. 04226","author":"Irie","year":"2019","journal-title":"CoRR"},{"key":"ref5","article-title":"Cross-lingual language model pretraining","volume":"abs\/1901. 07291","author":"Lample","year":"2019","journal-title":"CoRR"},{"key":"ref6","article-title":"Unsupervised cross-lingual representation learning at scale","volume":"abs\/1911. 02116","author":"Conneau","year":"2019","journal-title":"CoRR"},{"key":"ref7","article-title":"How multilingual is multilingual bert?","volume":"abs\/1906. 01502","author":"Pires","year":"2019","journal-title":"CoRR"},{"volume-title":"Improving language understanding by generative pre-training","year":"2018","author":"Radford","key":"ref8"},{"key":"ref9","article-title":"BERT: pre-training of deep bidirectional transformers for language understanding","volume":"abs\/1810. 04805","author":"Devlin","year":"2018","journal-title":"CoRR"},{"key":"ref10","article-title":"Fine-tuned language models for text classification","volume":"abs\/1801. 06146","author":"Howard","year":"2018","journal-title":"CoRR"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1252"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-2831"},{"key":"ref13","article-title":"Unsupervised cross-lingual representation learning for speech recognition","volume":"abs\/2006. 13979","author":"Conneau","year":"2020","journal-title":"CoRR"},{"key":"ref14","article-title":"Neural machine translation of rare words with subword units","volume":"abs\/1508. 07909","author":"Sennrich","year":"2015","journal-title":"CoRR"},{"key":"ref15","article-title":"Multilingual translation with extensible multilingual pretraining and finetuning","volume":"abs\/2008. 00401","author":"Tang","year":"2020","journal-title":"CoRR"},{"key":"ref16","article-title":"Training tips for the transformer model","volume":"abs\/1804. 00247","author":"Popel","year":"2018","journal-title":"CoRR"}],"event":{"name":"2022 Ninth International Conference on Social Networks Analysis, Management and Security (SNAMS)","start":{"date-parts":[[2022,11,29]]},"location":"Milan, Italy","end":{"date-parts":[[2022,12,1]]}},"container-title":["2022 Ninth International Conference on Social Networks Analysis, Management and Security (SNAMS)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10062356\/10062484\/10062774.pdf?arnumber=10062774","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,2,13]],"date-time":"2024-02-13T12:22:15Z","timestamp":1707826935000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10062774\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,11,29]]},"references-count":16,"URL":"https:\/\/doi.org\/10.1109\/snams58071.2022.10062774","relation":{},"subject":[],"published":{"date-parts":[[2022,11,29]]}}}