{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T20:34:02Z","timestamp":1776890042944,"version":"3.51.2"},"reference-count":31,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11434780","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-7","source":"Crossref","is-referenced-by-count":5,"title":["Omni-R1: Do You Really Need Audio to Fine-Tune Your Audio LLM?"],"prefix":"10.1109","author":[{"given":"Andrew","family":"Rouditchenko","sequence":"first","affiliation":[{"name":"MIT CSAIL"}]},{"given":"Saurabhchand","family":"Bhati","sequence":"additional","affiliation":[{"name":"MIT CSAIL"}]},{"given":"Edson","family":"Araujo","sequence":"additional","affiliation":[{"name":"Goethe University of Frankfurt"}]},{"given":"Samuel","family":"Thomas","sequence":"additional","affiliation":[{"name":"IBM Research AI"}]},{"given":"Hilde","family":"Kuehne","sequence":"additional","affiliation":[{"name":"Goethe University of Frankfurt"}]},{"given":"Rogerio","family":"Feris","sequence":"additional","affiliation":[{"name":"IBM Research AI"}]},{"given":"James","family":"Glass","sequence":"additional","affiliation":[{"name":"MIT CSAIL"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Deepseek-r1: Incentivizing reasoning capability in 11 ms via reinforcement learning","author":"Guo","year":"2025","journal-title":"arXiv preprint arXiv:2501.12948"},{"key":"ref2","article-title":"Qwen2. 5-omni technical report","volume-title":"arXiv preprint arXiv:2503.20215","author":"Xu","year":"2025"},{"key":"ref3","article-title":"Deepseekmath: Pushing the limits of mathematical reasoning in open language models","author":"Shao","year":"2024","journal-title":"arXiv preprint arXiv:2402.03300"},{"key":"ref4","article-title":"MMAU: A massive multitask audio understanding and reasoning benchmark","volume-title":"The Thirteenth International Conference on Learning Representations","author":"Sakshi"},{"key":"ref5","article-title":"Mmar: A challenging benchmark for deep reasoning in speech, audio, music, and their mix","author":"Ma","year":"2025","journal-title":"arXiv preprint arXiv:2505.13032"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3548291"},{"key":"ref7","article-title":"Reinforcement learning outperforms supervised fine-tuning: A case study on audio question answering","author":"Li","year":"2025","journal-title":"arXiv preprint arXiv:2503.11197"},{"key":"ref8","article-title":"Qwen2-audio technical report","volume-title":"arXiv preprint arXiv:2407.10759","author":"Chu","year":"2024"},{"key":"ref9","article-title":"Sari: Structured audio reasoning via curriculum-guided reinforcement learning","author":"Wen","year":"2025","journal-title":"arXiv preprint arXiv:2504.15900"},{"key":"ref10","article-title":"Are you really listening? boosting perceptual awareness in music-qa benchmarks","author":"Zang","year":"2025","journal-title":"arXiv preprint arXiv:2504.00369"},{"key":"ref11","article-title":"Audio-reasoner: Improving reasoning capability in large audio language models","author":"Xie","year":"2025","journal-title":"arXiv preprint arXiv:2503.02318"},{"key":"ref12","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv preprint arXiv:1707.06347"},{"key":"ref13","article-title":"Audio flamingo 2: An audio-language model with long-audio understanding and expert reasoning abilities","author":"Ghosh","year":"2025","journal-title":"arXiv preprint arXiv:2503.03983"},{"key":"ref14","article-title":"Phi-4-mini technical report: Compact yet powerful multimodal language models via mixture-of-loras","volume-title":"arXiv preprint arXiv:2503.01743","author":"Abouelenin","year":"2025"},{"key":"ref15","article-title":"A preliminary exploration with gpt-4o voice mode","author":"Lin","year":"2025","journal-title":"arXiv preprint arXiv:2502.09940"},{"key":"ref16","article-title":"Audio-cot: Exploring chain-of-thought reasoning in large audio language model","author":"Ma","year":"2025","journal-title":"arXiv preprint arXiv:2501.07246"},{"key":"ref17","article-title":"Kimi-audio technical report","volume-title":"arXiv preprint arXiv:2504.18425","author":"Ding","year":"2025"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/taslpro.2025.3583354"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9053174"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.52202\/075280-0795"},{"key":"ref21","article-title":"Listen, think, and understand","volume-title":"International Conference on Learning Representations","author":"Gong"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389742"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-naacl.231"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447027"},{"key":"ref25","article-title":"M2 ugen: Multi-modal music understanding and generation with the power of large language models","year":"2023","journal-title":"arXiv preprint arXiv:2311.11255"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.361"},{"key":"ref27","article-title":"Salmonn: Towards generic hearing abilities for large language models","volume-title":"International Conference on Learning Representations","author":"Tang"},{"key":"ref28","article-title":"Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models","author":"Chu","year":"2023","journal-title":"arXiv preprint arXiv:2311.07919"},{"key":"ref29","article-title":"Gemini 2.0 flash","year":"2025"},{"key":"ref30","article-title":"Gpt-4o system card","author":"Hurst","year":"2024","journal-title":"arXiv preprint arXiv:2410.21276"},{"key":"ref31","article-title":"Think you have solved question answering? try arc, the ai2 reasoning challenge","author":"Clark","year":"2018","journal-title":"arXiv preprint arXiv:1803.05457"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11434780.pdf?arnumber=11434780","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:59:44Z","timestamp":1775192384000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11434780\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":31,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11434780","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}