{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T06:15:55Z","timestamp":1774419355253,"version":"3.50.1"},"reference-count":36,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,4,6]],"date-time":"2025-04-06T00:00:00Z","timestamp":1743897600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,4,6]],"date-time":"2025-04-06T00:00:00Z","timestamp":1743897600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,4,6]]},"DOI":"10.1109\/icassp49660.2025.10889092","type":"proceedings-article","created":{"date-parts":[[2025,3,12]],"date-time":"2025-03-12T13:52:43Z","timestamp":1741787563000},"page":"1-5","source":"Crossref","is-referenced-by-count":0,"title":["What Are They Doing? Joint Audio-Speech Co-Reasoning"],"prefix":"10.1109","author":[{"given":"Yingzhi","family":"Wang","sequence":"first","affiliation":[{"name":"Elm,Research Center,Riyadh,KSA"}]},{"given":"Pooneh","family":"Mousavi","sequence":"additional","affiliation":[{"name":"Concordia University Mila,Montreal,Canada"}]},{"given":"Artem","family":"Ploujnikov","sequence":"additional","affiliation":[{"name":"Universit&#x00E9; de Montr&#x00E9;al Mila,Montreal,Canada"}]},{"given":"Mirco","family":"Ravanelli","sequence":"additional","affiliation":[{"name":"Concordia University Mila,Montreal,Canada"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2021.3090678"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2011.2112333"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/78.553476"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/29.1488"},{"key":"ref5","volume-title":"Automatic speech recognition.","volume":"1","author":"Yu","year":"2016"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1002\/9781119992691"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/5.628714"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2011.2125954"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2010.09.020"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389790"},{"key":"ref11","article-title":"On the opportunities and risks of foundation models","author":"Bommasani","year":"2021"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389742"},{"key":"ref13","article-title":"Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models","author":"Chu","year":"2023"},{"key":"ref14","article-title":"Salmonn: Towards generic hearing abilities for large language models","author":"Tang","year":"2023"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.263"},{"key":"ref16","article-title":"Speechverse: A large-scale generalizable audio language model","author":"Das","year":"2024"},{"key":"ref17","first-page":"28492","article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"International conference on machine learning","author":"Radford"},{"key":"ref18","article-title":"Beats: Audio pre-training with acoustic tokenizers","author":"Chen","year":"2022"},{"key":"ref19","first-page":"19730","article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"International conference on machine learning","author":"Li"},{"issue":"3","key":"ref20","first-page":"6","article-title":"Vicuna: An open-source chatbot impressing gpt-4 with 90%* chatgpt quality","volume":"2","author":"Chiang","year":"2023"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/JSTSP.2022.3188113"},{"key":"ref22","article-title":"Llama 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448257"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.109"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.naacl-long.218"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952261"},{"key":"ref27","article-title":"Fma: A dataset for music analysis","author":"Defferrard","year":"2016"},{"key":"ref28","article-title":"Language models are multilingual chain-of-thought reasoners","author":"Shi","year":"2022"},{"key":"ref29","article-title":"Lora: Low-rank adaptation of large language models","author":"Hu","year":"2021"},{"key":"ref30","article-title":"Judging llm-as-a-judge with mt-bench and chatbot arena","volume":"36","author":"Zheng","year":"2024","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref31","article-title":"Vibe-eval: A hard evaluation suite for measuring progress of multimodal language models","author":"Padlewski","year":"2024"},{"key":"ref32","article-title":"Gpt-4 technical report","author":"Achiam","year":"2023"},{"key":"ref33","first-page":"486","article-title":"Freesound datasets: a platform for the creation of open audio datasets","volume-title":"Proceedings of the 18th ISMIR Conference; 2017 oct 23-27; Suzhou, China.[Canada]: International Society for Music Information Retrieval; 2017","author":"Fonseca"},{"key":"ref34","article-title":"Qwen2-audio technical report","author":"Chu","year":"2024"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1145\/3689217.3690621"},{"key":"ref36","first-page":"24824","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume":"35","author":"Wei","year":"2022","journal-title":"Advances in neural information processing systems"}],"event":{"name":"ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Hyderabad, India","start":{"date-parts":[[2025,4,6]]},"end":{"date-parts":[[2025,4,11]]}},"container-title":["ICASSP 2025 - 2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/10887540\/10887541\/10889092.pdf?arnumber=10889092","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T05:22:27Z","timestamp":1774416147000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10889092\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,4,6]]},"references-count":36,"URL":"https:\/\/doi.org\/10.1109\/icassp49660.2025.10889092","relation":{},"subject":[],"published":{"date-parts":[[2025,4,6]]}}}