{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,25]],"date-time":"2026-04-25T14:57:16Z","timestamp":1777129036161,"version":"3.51.4"},"reference-count":37,"publisher":"IEEE","license":[{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,14]],"date-time":"2024-04-14T00:00:00Z","timestamp":1713052800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024,4,14]]},"DOI":"10.1109\/icassp48485.2024.10445874","type":"proceedings-article","created":{"date-parts":[[2024,3,18]],"date-time":"2024-03-18T18:56:31Z","timestamp":1710788191000},"page":"12637-12641","source":"Crossref","is-referenced-by-count":41,"title":["Connecting Speech Encoder and Large Language Model for ASR"],"prefix":"10.1109","author":[{"given":"Wenyi","family":"Yu","sequence":"first","affiliation":[{"name":"Tsinghua University,Department of Electronic Engineering"}]},{"given":"Changli","family":"Tang","sequence":"additional","affiliation":[{"name":"Tsinghua University,Department of Electronic Engineering"}]},{"given":"Guangzhi","family":"Sun","sequence":"additional","affiliation":[{"name":"Tsinghua University,Department of Electronic Engineering"}]},{"given":"Xianzhao","family":"Chen","sequence":"additional","affiliation":[{"name":"ByteDance"}]},{"given":"Tian","family":"Tan","sequence":"additional","affiliation":[{"name":"ByteDance"}]},{"given":"Wei","family":"Li","sequence":"additional","affiliation":[{"name":"ByteDance"}]},{"given":"Lu","family":"Lu","sequence":"additional","affiliation":[{"name":"ByteDance"}]},{"given":"Zejun","family":"Ma","sequence":"additional","affiliation":[{"name":"ByteDance"}]},{"given":"Chao","family":"Zhang","sequence":"additional","affiliation":[{"name":"Tsinghua University,Department of Electronic Engineering"}]}],"member":"263","reference":[{"key":"ref1","volume-title":"GPT-4 technical report","year":"2023"},{"key":"ref2","article-title":"Language models are few-shot learners","volume-title":"Proc. NeurIPS","author":"Brown"},{"key":"ref3","article-title":"PaLM 2 technical report","author":"Anil","year":"2023"},{"key":"ref4","article-title":"LLaMA: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref5","article-title":"Vicuna: An open-source chatbot impressing GPT-4 with 90%* ChatGPT quality","author":"Chiang","year":"2023"},{"key":"ref6","article-title":"Pangu-\u03b1: Large-scale autoregressive pretrained Chinese language models with auto-parallel computation","author":"Zeng","year":"2021"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i21.30570"},{"key":"ref8","article-title":"HuggingGPT: Solving AI tasks with ChatGPT and its friends in HuggingFace","author":"Shen","year":"2023"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.1055"},{"key":"ref10","article-title":"AudioPaLM: A large language model that can speak and listen","author":"Rubenstein","year":"2023"},{"key":"ref11","article-title":"X-LLM: Bootstrapping advanced large language models by treating multi-modalities as foreign languages","author":"Chen","year":"2023"},{"key":"ref12","article-title":"LLaSM: Large language and speech model","author":"Shu","year":"2023"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389705"},{"key":"ref14","article-title":"Prompting large language models with speech recognition abilities","author":"Fathullah","year":"2023"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU57964.2023.10389732"},{"key":"ref16","article-title":"Can generative large language models perform asr error correction?","author":"Ma","year":"2023"},{"key":"ref17","article-title":"Leveraging large language models for exploiting asr uncertainty","author":"Dighe","year":"2023"},{"key":"ref18","article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"Proc. ICML","author":"Radford"},{"key":"ref19","article-title":"Macaw-LLM: Multi-modal language modeling with image, audio, video, and text integration","author":"Lyu","year":"2023"},{"key":"ref20","article-title":"BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"Proc. ICML","author":"Li"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3122291"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ASRU51503.2021.9688253"},{"key":"ref23","article-title":"Google USM: Scaling automatic speech recognition beyond 100 languages","author":"Zhang","year":"2023"},{"key":"ref24","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. NeurIPS","author":"Alayrac"},{"key":"ref25","article-title":"MiniGPT-4: Enhancing vision-language understanding with advanced large language models","author":"Zhu","year":"2023"},{"key":"ref26","article-title":"InstructBLIP: Towards general-purpose vision-language models with instruction tuning","author":"Dai","year":"2023"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-demo.49"},{"key":"ref28","article-title":"PandaGPT: One model to instruction-follow them all","author":"Su","year":"2023"},{"key":"ref29","article-title":"VideoLLM: Modeling video sequence with large language models","author":"Chen","year":"2023"},{"key":"ref30","article-title":"Video-ChatGPT: Towards detailed video understanding via large vision and language models","author":"Maaz","year":"2023"},{"key":"ref31","article-title":"Listen, think, and understand","author":"Gong","year":"2023"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/icassp48485.2024.10447027"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.5555\/3295222.3295349"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref35","article-title":"Common Voice: A massively-multilingual speech corpus","volume-title":"Proc. LREC","author":"Ardila"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1965"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1272"}],"event":{"name":"ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)","location":"Seoul, Korea, Republic of","start":{"date-parts":[[2024,4,14]]},"end":{"date-parts":[[2024,4,19]]}},"container-title":["ICASSP 2024 - 2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10445798\/10445803\/10445874.pdf?arnumber=10445874","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,8,2]],"date-time":"2024-08-02T04:37:23Z","timestamp":1722573443000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10445874\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4,14]]},"references-count":37,"URL":"https:\/\/doi.org\/10.1109\/icassp48485.2024.10445874","relation":{},"subject":[],"published":{"date-parts":[[2024,4,14]]}}}