{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T19:28:38Z","timestamp":1776886118958,"version":"3.51.2"},"reference-count":50,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11434596","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-8","source":"Crossref","is-referenced-by-count":1,"title":["Competitive Audio-Language Models with Data-Efficient Single-Stage Training on Public Data"],"prefix":"10.1109","author":[{"given":"Gokul Karthik","family":"Kumar","sequence":"first","affiliation":[{"name":"Technology Innovation Institute,Abu Dhabi,UAE"}]},{"given":"Rishabh","family":"Saraf","sequence":"additional","affiliation":[{"name":"Technology Innovation Institute,Abu Dhabi,UAE"}]},{"given":"Ludovick","family":"Lepauloux","sequence":"additional","affiliation":[{"name":"Technology Innovation Institute,Abu Dhabi,UAE"}]},{"given":"Abdul","family":"Muneer","sequence":"additional","affiliation":[{"name":"Technology Innovation Institute,Abu Dhabi,UAE"}]},{"given":"Billel","family":"Mokeddem","sequence":"additional","affiliation":[{"name":"Technology Innovation Institute,Abu Dhabi,UAE"}]},{"given":"Hakim","family":"Hacid","sequence":"additional","affiliation":[{"name":"Technology Innovation Institute,Abu Dhabi,UAE"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Gpt-4 technical report","volume-title":"arXiv preprint arXiv:2303.08774","author":"Achiam","year":"2023"},{"key":"ref2","article-title":"Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context","author":"Team","year":"2024","journal-title":"arXiv preprint arXiv:2403.05530"},{"key":"ref3","article-title":"The llama 3 herd of models","author":"Grattafiori","year":"2024"},{"key":"ref4","article-title":"The falcon 3 family of open models","author":"Team","year":"2024"},{"key":"ref5","article-title":"Qwen2 technical report","volume-title":"arXiv preprint arXiv:2407.10671","author":"Yang","year":"2024"},{"key":"ref6","article-title":"Phi-3 technical report: A highly capable language model locally on your phone","volume-title":"arXiv preprint arXiv:2404.14219","author":"Abdin","year":"2024"},{"key":"ref7","article-title":"Vicuna: An open-source chatbot impressing gpt-4 with 90%* chatgpt quality","author":"Chiang","year":"2023"},{"key":"ref8","article-title":"Openchat: Advancing open-source language models with mixed-quality data","author":"Wang","year":"2023","journal-title":"arXiv preprint arXiv:2309.11235"},{"key":"ref9","article-title":"Dora: Weight-decomposed low-rank adaptation","volume-title":"Forty-first International Conference on Machine Learning","author":"Liu"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.52202\/068431-1723"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72643-9_22"},{"key":"ref13","article-title":"Qwen-vl: A frontier large vision-language model with versatile abilities","author":"Bai","year":"2023","journal-title":"arXiv preprint arXiv:2308.12966"},{"key":"ref14","article-title":"Obelics: An open web-scale filtered dataset of interleaved image-text documents","author":"Lauren\u00e7on","year":"2023"},{"key":"ref15","article-title":"Learning transferable visual models from natural language supervision","author":"Radford","year":"2021"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01100"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747631"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10095889"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447027"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-naacl.231"},{"key":"ref21","article-title":"Listen, think, and understand","author":"Gong","year":"2024"},{"key":"ref22","article-title":"An embarrassingly simple approach for 11 m with strong asr capacity","author":"Ma","year":"2024","journal-title":"arXiv preprint arXiv:2402.08846"},{"key":"ref23","article-title":"Wavtokenizer: an efficient acoustic discrete codec tokenizer for audio language modeling","author":"Ji","year":"2024","journal-title":"arXiv preprint arXiv:2408.16532"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.52202\/075280-0795"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389742"},{"key":"ref26","article-title":"Salmonn: Towards generic hearing abilities for large language models","author":"Tang","year":"2024"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.361"},{"key":"ref28","article-title":"Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models","author":"Chu","year":"2023"},{"key":"ref29","article-title":"Qwen2-audio technical report","author":"Chu","year":"2024"},{"key":"ref30","article-title":"Beats: Audio pre-training with acoustic tokenizers","author":"Chen","year":"2022"},{"key":"ref31","article-title":"Reinforcement learning outperforms supervised finetuning: A case study on audio question answering","author":"Li","year":"2025","journal-title":"arXiv preprint arXiv:2503.11197"},{"key":"ref32","article-title":"Audio flamingo 2: An audio-language model with long-audio understanding and expert reasoning abilities","author":"Ghosh","year":"2025","journal-title":"arXiv preprint arXiv:2503.03983"},{"key":"ref33","article-title":"Phi-4-mini technical report: Compact yet powerful multimodal language models via mixture-ofloras","volume-title":"arXiv preprint arXiv:2503.01743","author":"Abouelenin","year":"2025"},{"key":"ref34","article-title":"Qwen2. 5-omni technical report","volume-title":"arXiv preprint arXiv:2503.20215","author":"Xu","year":"2025"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.109"},{"key":"ref36","article-title":"Mmau: A massive multi-task audio understanding and reasoning benchmark","author":"Sakshi","year":"2024","journal-title":"arXiv preprint arXiv:2410.19168"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-698"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01457"},{"key":"ref39","first-page":"28492","article-title":"Robust speech recognition via largescale weak supervision","volume-title":"International conference on machine learning. PMLR","author":"Radford"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-2193"},{"key":"ref41","article-title":"Shuka v1 - ai-powered conversations","author":"AI","year":"2024"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952261"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/N19-1011"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3133208"},{"key":"ref46","first-page":"486","article-title":"Freesound datasets: a platform for the creation of open audio datasets","volume-title":"Proceedings of the 18th ISMIR Conference; 2017 oct 23-27; Suzhou, China.[Canada]: International Society for Music Information Retrieval; 2017","author":"Fonseca"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2019-2441"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053174"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2017-950"},{"key":"ref50","article-title":"Instruction speech","year":"2024","journal-title":"JanAI"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11434596.pdf?arnumber=11434596","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:56:50Z","timestamp":1775192210000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11434596\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":50,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11434596","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}