{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T20:05:16Z","timestamp":1776888316108,"version":"3.51.2"},"reference-count":39,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100002465","name":"Delta","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100002465","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11434628","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-6","source":"Crossref","is-referenced-by-count":5,"title":["Audio-CoT: Exploring Chain-of-Thought Reasoning in Large Audio Language Model"],"prefix":"10.1109","author":[{"given":"Ziyang","family":"Ma","sequence":"first","affiliation":[{"name":"Shanghai Jiao Tong University,X-LANCE Lab MoE Key Lab of Artificial Intelligence"}]},{"given":"Zhuo","family":"Chen","sequence":"additional","affiliation":[{"name":"ByteDance Inc."}]},{"given":"Yuping","family":"Wang","sequence":"additional","affiliation":[{"name":"ByteDance Inc."}]},{"given":"Eng-Siong","family":"Chng","sequence":"additional","affiliation":[{"name":"Nanyang Technological University,College of Computing and Data Science"}]},{"given":"Xie","family":"Chen","sequence":"additional","affiliation":[{"name":"Shanghai Jiao Tong University,X-LANCE Lab MoE Key Lab of Artificial Intelligence"}]}],"member":"263","reference":[{"key":"ref1","article-title":"SALMONN: Towards generic hearing abilities for large language models","volume-title":"Proc. ICLR","author":"Tang"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389742"},{"key":"ref3","article-title":"Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models","author":"Chu","year":"2023","journal-title":"arXiv preprint arXiv:2311.07919"},{"key":"ref4","article-title":"Qwen2-audio technical report","volume-title":"arXiv preprint arXiv:2407.10759","author":"Chu","year":"2024"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.361"},{"key":"ref6","article-title":"Audio flamingo: A novel audio language model with few-shot learning and dialogue abilities","volume-title":"Proc. ICML","author":"Kong"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389732"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389705"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i23.34666"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447605"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10445874"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-488"},{"key":"ref13","article-title":"CTC-Assisted LLMBased Contextual ASR","volume-title":"Proc. SLT","author":"Yang"},{"key":"ref14","article-title":"Unveiling the potential of LLM-ased asr on Chinese opensource datasets","author":"Geng","year":"2024","journal-title":"arXiv preprint arXiv:2405.02132"},{"key":"ref15","article-title":"Seed-ASR: Understanding diverse speech and contexts with llm-based speech recognition","author":"Bai","year":"2024","journal-title":"arXiv preprint arXiv:2407.04675"},{"key":"ref16","article-title":"BEATs-based audio captioning model with INSTRUCTOR embedding supervision and ChatGPT mix-up","volume-title":"Proc. DCASE Challenge","author":"Wu"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10889071"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10890325"},{"key":"ref19","article-title":"Leveraging ced encoder and large language models for automated audio captioning","volume-title":"Proc. DCASE Challenge","author":"Liu"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448257"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.109"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.naacl-long.218"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.naacl-demo.19"},{"key":"ref24","article-title":"Chain-of-thought prompting elicits reasoning in large language models","volume-title":"Proc. Neurips","author":"Wei"},{"key":"ref25","article-title":"Sparks of large audio models: A survey and outlook","author":"Latif","year":"2023","journal-title":"arXiv preprint arXiv:2308.12792"},{"key":"ref26","article-title":"Towards audio language modeling-an overview","author":"Wu","year":"2024","journal-title":"arXiv preprint arXiv:2402.13236"},{"key":"ref27","article-title":"A survey on speech large language models","author":"Peng","year":"2024","journal-title":"arXiv preprint arXiv:2410.18908"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.65"},{"key":"ref29","article-title":"Large language models are zero-shot reasoners","volume-title":"Proc. Neurips","author":"Kojima"},{"key":"ref30","article-title":"Automatic chain of thought prompting in large language models","volume-title":"Proc. ICLR","author":"Zhang"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.acl-long.225"},{"key":"ref32","article-title":"Expertprompting: Instructing large language models to be distinguished experts","author":"Xu","year":"2023","journal-title":"arXiv preprint arXiv:2305.14688"},{"key":"ref33","article-title":"Multimodal chain-of-thought reasoning in language models","volume-title":"Proc. TMLR","author":"Zhang"},{"key":"ref34","article-title":"The role of chain-of-thought in complex vision-language reasoning task","author":"Wu","year":"2023","journal-title":"arXiv preprint arXiv:2311.09193"},{"key":"ref35","article-title":"M3 CoT: A novel benchmark for multi-domain multi-step multi-modal chain-ofthought","volume-title":"Proc. ACL","author":"Chen"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.680"},{"key":"ref37","article-title":"Omnibench: Towards the future of universal omni-language models","author":"Li","year":"2024","journal-title":"arXiv preprint arXiv:2409.15272"},{"key":"ref38","article-title":"MMAU: A massive multitask audio understanding and reasoning benchmark","volume-title":"Proc. ICLR","author":"Sakshi"},{"key":"ref39","article-title":"Self-consistency improves chain of thought reasoning in language models","volume-title":"Proc. ICLR","author":"Wang"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11434628.pdf?arnumber=11434628","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:57:29Z","timestamp":1775192249000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11434628\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":39,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11434628","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}