{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T19:57:28Z","timestamp":1776887848922,"version":"3.51.2"},"reference-count":67,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,6]],"date-time":"2025-12-06T00:00:00Z","timestamp":1764979200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100000761","name":"Imperial College London","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100000761","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,12,6]]},"DOI":"10.1109\/asru65441.2025.11434763","type":"proceedings-article","created":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T19:48:04Z","timestamp":1775159284000},"page":"1-8","source":"Crossref","is-referenced-by-count":2,"title":["Adaptive Audio-Visual Speech Recognition via Matryoshka-Based Multimodal LLMs"],"prefix":"10.1109","author":[{"given":"Umberto","family":"Cappellazzo","sequence":"first","affiliation":[{"name":"Imperial College,London"}]},{"given":"Minsu","family":"Kim","sequence":"additional","affiliation":[{"name":"Meta AI"}]},{"given":"Stavros","family":"Petridis","sequence":"additional","affiliation":[{"name":"Imperial College,London"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/6046.865479"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/s10489-014-0629-7"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2018.2889052"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/SLT.2018.8639643"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9414567"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2022-11311"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096889"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01801"},{"key":"ref9","article-title":"Learning audiovisual speech representation by masked multimodal cluster prediction","volume-title":"International Conference on Learning Representations","author":"Shi"},{"key":"ref10","article-title":"Jointly learning visual and auditory speech representations from raw data","volume-title":"International Conference on Learning Representations","author":"Haliassos"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10448473"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.52202\/068431-1538"},{"key":"ref13","article-title":"Unified speech recognition: A single model for auditory, visual, and audiovisual inputs","author":"Haliassos","year":"2024","journal-title":"NeurIPS"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00430"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i21.30570"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.860"},{"key":"ref17","article-title":"It\u2019s never too late: Fusing acoustic information into large language models for automatic speech recognition","author":"Chen","year":"2024","journal-title":"ICLR"},{"key":"ref18","article-title":"Large language models are efficient learners of noise-robust speech recognition","author":"Hu","year":"2024","journal-title":"ICLR"},{"key":"ref19","article-title":"An embarrassingly simple approach for lm with strong asr capacity","author":"Ma","year":"2024","journal-title":"arXiv preprint arXiv:2402.08846"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10445874"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP48485.2024.10447605"},{"key":"ref22","article-title":"Llama-omni: Seamless speech interaction with large language models","author":"Fang","year":"2024","journal-title":"arXiv preprint arXiv:2409.06666"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10889444"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.iwslt-1.5"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.666"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10889251"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2025-111"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.52202\/068431-2192"},{"key":"ref29","article-title":"Matformer: Nested transformer for elastic inference","author":"Kudugunta","year":"2024","journal-title":"NeurIPS"},{"key":"ref30","article-title":"Matryoshka quantization","author":"Nair","year":"2025","journal-title":"arXiv preprint arXiv:2502.06786"},{"key":"ref31","article-title":"Matryoshka multimodal models","author":"Cai","year":"2024","journal-title":"arXiv preprint arXiv:2405.17430"},{"key":"ref32","article-title":"Matryoshka query transformer for large vision-language models","author":"Hu","year":"2024","journal-title":"arXiv preprint arXiv:2405.19315"},{"key":"ref33","article-title":"The llama 3 herd of models","author":"Dubey","year":"2024","journal-title":"arXiv preprint arXiv:2407.21783"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.52202\/075280-1516"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02520"},{"key":"ref36","article-title":"Puma: Empowering unified mllm with multi-granular visual generation","author":"Fang","year":"2024","journal-title":"arXiv preprint arXiv:2410.13861"},{"key":"ref37","article-title":"Mousi: Poly-visual-expert vision-language models","author":"Fan","year":"2024","journal-title":"arXiv preprint arXiv:2401.17221"},{"key":"ref38","article-title":"Mova: Adapting mixture of vision experts to multimodal context","author":"Zong","year":"2024","journal-title":"NeurIPS"},{"key":"ref39","article-title":"Llava-mini: Efficient image and video large multimodal models with one vision token","author":"Zhang","year":"2025","journal-title":"arXiv preprint arXiv:2501.03895"},{"key":"ref40","article-title":"Meteor: Mamba-based traversal of rationale for large language and vision models","author":"Lee","year":"2024","journal-title":"NeurIPS"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52734.2025.00901"},{"key":"ref42","article-title":"Cumo: Scaling multimodal llm with co-upcycled mixture-ofexperts","author":"Li","year":"2024","journal-title":"NeurIPS"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.00914"},{"key":"ref44","article-title":"Dense connector for mllms","author":"Yao","year":"2024","journal-title":"NeurIPS"},{"key":"ref45","article-title":"Feast your eyes: Mixture-of-resolution adaptation for multimodal large language models","author":"Luo","year":"2024","journal-title":"arXiv preprint arXiv:2403.03003"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52734.2025.00390"},{"key":"ref47","article-title":"Videollama 3: Frontier multimodal foundation models for image and video understanding","author":"Zhang","year":"2025","journal-title":"arXiv preprint arXiv:2501.13106"},{"key":"ref48","article-title":"Minigpt-4: Enhancing vision-language understanding with advanced large language models","author":"Zhu","year":"2023","journal-title":"arXiv preprint arXiv:2304.10592"},{"key":"ref49","first-page":"19730","article-title":"Blip-2: Bootstrapping languageimage pre-training with frozen image encoders and large language models","volume-title":"International conference on machine learning","author":"Li"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.01311"},{"key":"ref51","article-title":"Salmonn: Towards generic hearing abilities for large language models","author":"Tang","year":"2023","journal-title":"arXiv preprint arXiv:2310.13289"},{"key":"ref52","article-title":"Lora: Low-rank adaptation of large language models","author":"Hu","year":"2021","journal-title":"arXiv preprint arXiv:2106.09685"},{"key":"ref53","article-title":"Outrageously large neural networks: The sparsely-gated mixture-of-experts layer","author":"Shazeer","year":"2017","journal-title":"arXiv preprint arXiv:1701.06538"},{"issue":"120","key":"ref54","first-page":"1","article-title":"Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity","volume":"23","author":"Fedus","year":"2022","journal-title":"Journal of Machine Learning Research"},{"key":"ref55","article-title":"St-moe: Designing stable and transferable sparse expert models","author":"Zoph","year":"2022","journal-title":"arXiv preprint arXiv:2202.08906"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.52202\/068431-0695"},{"key":"ref57","article-title":"From sparse to soft mixtures of experts","author":"Puigcerver","year":"2023","journal-title":"arXiv preprint arXiv:2308.00951"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-38"},{"key":"ref59","article-title":"Mixtral of experts","author":"Jiang","year":"2024","journal-title":"arXiv preprint arXiv:2401.04088"},{"key":"ref60","article-title":"Olmoe: Open mixture-of-experts language models","author":"Muennighoff","year":"2024","journal-title":"arXiv preprint arXiv:2409.02060"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.367"},{"key":"ref62","article-title":"Lrs3-ted: a large-scale dataset for visual speech recognition","author":"Afouras","year":"2018","journal-title":"arXiv preprint arXiv:1809.00496"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096889"},{"key":"ref64","first-page":"28492","article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"International conference on machine learning.","author":"Radford"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1016\/0167-6393(93)90095-3"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/WACV56688.2023.00229"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2024-322"}],"event":{"name":"2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,12,6]]},"end":{"date-parts":[[2025,12,10]]}},"container-title":["2025 IEEE Automatic Speech Recognition and Understanding Workshop (ASRU)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11434577\/11433836\/11434763.pdf?arnumber=11434763","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T04:59:31Z","timestamp":1775192371000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11434763\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,6]]},"references-count":67,"URL":"https:\/\/doi.org\/10.1109\/asru65441.2025.11434763","relation":{},"subject":[],"published":{"date-parts":[[2025,12,6]]}}}