{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,7,4]],"date-time":"2025-07-04T05:56:10Z","timestamp":1751608570351,"version":"3.28.0"},"reference-count":18,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,1,9]],"date-time":"2023-01-09T00:00:00Z","timestamp":1673222400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,1,9]],"date-time":"2023-01-09T00:00:00Z","timestamp":1673222400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,1,9]]},"DOI":"10.1109\/slt54892.2023.10023233","type":"proceedings-article","created":{"date-parts":[[2023,1,27]],"date-time":"2023-01-27T13:54:03Z","timestamp":1674827643000},"page":"295-301","source":"Crossref","is-referenced-by-count":3,"title":["Domain Adaptation of Low-Resource Target-Domain Models Using Well-Trained ASR Conformer Models"],"prefix":"10.1109","author":[{"given":"Vrunda N.","family":"Sukhadia","sequence":"first","affiliation":[{"name":"IIT Madras,Speech Lab,Dept. of Electrical Engineering,Chennai,India"}]},{"given":"S.","family":"Umesh","sequence":"additional","affiliation":[{"name":"IIT Madras,Speech Lab,Dept. of Electrical Engineering,Chennai,India"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1860"},{"key":"ref15","article-title":"The 2020 espnet update: new features, broadened applications, performance improvements, and future plans","volume":"abs 2012 13006","author":"watanabe","year":"2020","journal-title":"CoRR"},{"journal-title":"Gigaspeech An evolving multi-domain asr corpus with 10 000 hours of transcribed audio","year":"2021","author":"chen","key":"ref14"},{"key":"ref11","article-title":"Specaugment: A simple data augmentation method for automatic speech recog-nition","author":"park","year":"2019","journal-title":"ArXiv Preprint"},{"key":"ref10","article-title":"How transferable are features in deep neural networks?","author":"yosinski","year":"2014","journal-title":"ArXiv Preprint"},{"key":"ref2","first-page":"243","article-title":"Inter-nallanguage model estimation for domain-adaptive end-to-end speech recognition","author":"meng","year":"0","journal-title":"2021 IEEE Spoken Language Technology Workshop (SLT)"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TKDE.2009.191"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU51503.2021.9688137"},{"key":"ref16","article-title":"Con-former: Convolution-augmented transformer for speech recognition","author":"gulati","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1775"},{"key":"ref8","article-title":"Unsupervised cross-lingual representation learning at scale","author":"conneau","year":"2019","journal-title":"ar Xiv preprint"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-236"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU.2017.8268947"},{"key":"ref4","article-title":"Multitask training with text data for end-to-end speech recognition","author":"wang","year":"2020","journal-title":"ArXiv Preprint"},{"key":"ref3","article-title":"Librispeech transducer model with inter-nal language model prior correction","author":"zeyer","year":"2021","journal-title":"ar Xiv preprint"},{"key":"ref6","article-title":"Hu-bert: Self-supervised speech representation learning by masked prediction of hidden units","author":"hsu","year":"2021","journal-title":"ArXiv Preprint"},{"key":"ref5","article-title":"wav2vec 2.0: A framework for self-supervised learning of speech representations","author":"baevski","year":"2020","journal-title":"ArXiv Preprint"}],"event":{"name":"2022 IEEE Spoken Language Technology Workshop (SLT)","start":{"date-parts":[[2023,1,9]]},"location":"Doha, Qatar","end":{"date-parts":[[2023,1,12]]}},"container-title":["2022 IEEE Spoken Language Technology Workshop (SLT)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10022052\/10022330\/10023233.pdf?arnumber=10023233","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,2,20]],"date-time":"2023-02-20T17:08:48Z","timestamp":1676912928000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10023233\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,1,9]]},"references-count":18,"URL":"https:\/\/doi.org\/10.1109\/slt54892.2023.10023233","relation":{},"subject":[],"published":{"date-parts":[[2023,1,9]]}}}