{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T19:33:20Z","timestamp":1776886400884,"version":"3.51.2"},"reference-count":40,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11434627","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-7","source":"Crossref","is-referenced-by-count":2,"title":["Low-Resource Domain Adaptation for Speech LLMs via Text-Only Fine-Tuning"],"prefix":"10.1109","author":[{"given":"Yangui","family":"Fang","sequence":"first","affiliation":[{"name":"Huazhong University of Science and Technology,School of Electronic Information and Communications"}]},{"given":"Jing","family":"Peng","sequence":"additional","affiliation":[{"name":"AI Institute, X-LANCE Lab, Shanghai Jiao Tong University,MoE Key Lab of Artificial Intelligence,Shanghai,China"}]},{"given":"Xu","family":"Li","sequence":"additional","affiliation":[{"name":"AISpeech Co., Ltd,Suzhou,China"}]},{"given":"Yu","family":"Xi","sequence":"additional","affiliation":[{"name":"AI Institute, X-LANCE Lab, Shanghai Jiao Tong University,MoE Key Lab of Artificial Intelligence,Shanghai,China"}]},{"given":"Chengwei","family":"Zhang","sequence":"additional","affiliation":[{"name":"Huazhong University of Science and Technology,School of Electronic Information and Communications"}]},{"given":"Guohui","family":"Zhong","sequence":"additional","affiliation":[{"name":"Huazhong University of Science and Technology,School of Electronic Information and Communications"}]},{"given":"Kai","family":"Yu","sequence":"additional","affiliation":[{"name":"AI Institute, X-LANCE Lab, Shanghai Jiao Tong University,MoE Key Lab of Artificial Intelligence,Shanghai,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1002\/j.1538-7305.1983.tb03114.x"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/29.46546"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2021.3112535"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2014.2339736"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2020-3015"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-9996"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2005.06.042"},{"key":"ref8","article-title":"Improving mandarin speech recogntion with block-augmented transformer","author":"Ren","year":"2022","journal-title":"arXiv preprint arXiv:2207.11697"},{"key":"ref9","article-title":"A survey on speech large language models","author":"Peng","year":"2025"},{"key":"ref10","article-title":"On the landscape of spoken language models: A comprehensive survey","author":"Arora","year":"2025"},{"key":"ref11","article-title":"An embarrassingly simple approach for 11 m with strong asr capacity","author":"Ma","year":"2024","journal-title":"arXiv preprint arXiv:2402.08846"},{"key":"ref12","article-title":"Salmonn: Towards generic hearing abilities for large language models","author":"Tang","year":"2023","journal-title":"arXiv preprint arXiv:2310.13289"},{"key":"ref13","article-title":"Seed-asr: Understanding diverse speech and contexts with 11 m -based speech recognition","author":"Bai","year":"2024","journal-title":"arXiv preprint arXiv:2407.04675"},{"key":"ref14","article-title":"Fireredasr: Open-source industrial-grade mandarin speech recognition models from encoder-decoder to 11 m integration","author":"Xu","year":"2025","journal-title":"arXiv preprint arXiv:2501.14350"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICSDA.2017.8384449"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/icasspw65056.2025.11010998"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2023.3237025"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1002\/9781394214624.ch5"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2904"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389722"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.459"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389617"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-1209"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747862"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.21437\/interspeech.2018-1392"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2015-177"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639034"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-767"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1171"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447240"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389722"},{"key":"ref33","article-title":"Mala-asr: Multimedia-assisted llm-based ass","author":"Yang","year":"2024","journal-title":"arXiv preprint arXiv:2406.05839"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10446898"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448079"},{"key":"ref36","article-title":"Medical speech, transcription, and intent","author":"Inc","year":"2019"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1965"},{"key":"ref38","first-page":"28492","article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"Proceedings of the 40th International Conference on Machine Learning","author":"Radford"},{"key":"ref39","article-title":"Qwen2.5: A party of foundation models","author":"Team","year":"2024"},{"key":"ref40","article-title":"OSUM: Advancing open speech understanding models with limited resources in academia","author":"Geng","year":"2025","journal-title":"arXiv preprint arXiv:2501.13306"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11434627.pdf?arnumber=11434627","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:57:27Z","timestamp":1775192247000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11434627\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":40,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11434627","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}