{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T06:36:49Z","timestamp":1773729409674,"version":"3.50.1"},"reference-count":79,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"5","license":[{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,5,1]],"date-time":"2025-05-01T00:00:00Z","timestamp":1746057600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62422603"],"award-info":[{"award-number":["62422603"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62376067"],"award-info":[{"award-number":["62376067"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2025,5]]},"DOI":"10.1109\/tpami.2025.3532688","type":"journal-article","created":{"date-parts":[[2025,2,13]],"date-time":"2025-02-13T18:40:04Z","timestamp":1739472004000},"page":"3424-3439","source":"Crossref","is-referenced-by-count":29,"title":["Uni-MoE: Scaling Unified Multimodal LLMs With Mixture of Experts"],"prefix":"10.1109","volume":"47","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4819-2489","authenticated-orcid":false,"given":"Yunxin","family":"Li","sequence":"first","affiliation":[{"name":"Department of Computer Science and Technology, Harbin Institute of Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-1513-6309","authenticated-orcid":false,"given":"Shenyuan","family":"Jiang","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Technology, Harbin Institute of Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-0640-6990","authenticated-orcid":false,"given":"Baotian","family":"Hu","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Technology, Harbin Institute of Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-9062-6183","authenticated-orcid":false,"given":"Longyue","family":"Wang","sequence":"additional","affiliation":[{"name":"Alibaba Group, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0004-7770-867X","authenticated-orcid":false,"given":"Wanqi","family":"Zhong","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Technology, Harbin Institute of Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5697-4168","authenticated-orcid":false,"given":"Wenhan","family":"Luo","sequence":"additional","affiliation":[{"name":"Hong Kong University of Science and Technology, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7331-6132","authenticated-orcid":false,"given":"Lin","family":"Ma","sequence":"additional","affiliation":[{"name":"Meituan, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3895-5510","authenticated-orcid":false,"given":"Min","family":"Zhang","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Technology, Harbin Institute of Technology, Shenzhen, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3275156"},{"key":"ref2","first-page":"49250","article-title":"InstructBLIP: Towards general-purpose vision-language models with instruction tuning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Dai"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.02484"},{"key":"ref4","article-title":"SEED-Bench: Benchmarking multimodal LLMs with generative comprehension","author":"Li","year":"2023"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3539597.3573023"},{"key":"ref6","article-title":"X-InstructBLIP: A framework for aligning X-modal instruction-aware representations to LLMs and emergent cross-modal reasoning","author":"Panagopoulou","year":"2023"},{"key":"ref7","article-title":"Macaw-LLM: Multi-modal language modeling with image, audio, video, and text integration","author":"Lyu","year":"2023"},{"key":"ref8","article-title":"Anymal: An efficient and scalable any-modality augmented language model","author":"Moon","year":"2023"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72952-2_19"},{"key":"ref10","article-title":"GPT-4 technical report","year":"2023"},{"key":"ref11","article-title":"Gemini: A family of highly capable multimodal models","author":"Team","year":"2023"},{"key":"ref12","first-page":"24185","article-title":"Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks","volume-title":"Proc. IEEE\/CVF Conf. Comput. Vis. Pattern Recognit.","author":"Chen"},{"key":"ref13","first-page":"71683","article-title":"Obelics: An open web-scale filtered dataset of interleaved image-text documents","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Lauren\u00e7on"},{"key":"ref14","article-title":"LLaMA 2: Open foundation and fine-tuned chat models","author":"Touvron","year":"2023"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2024.3428317"},{"key":"ref16","article-title":"A comprehensive evaluation of GPT-4V on knowledge-intensive visual question answering","author":"Li","year":"2023"},{"key":"ref17","first-page":"1","article-title":"Outrageously large neural networks: The sparsely-gated mixture-of-experts layer","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Shazeer"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2012.2200299"},{"key":"ref19","article-title":"Mixtral of experts","author":"Jiang","year":"2024"},{"key":"ref20","article-title":"MoE-LLaVA: Mixture of experts for large vision-language models","author":"Lin","year":"2024"},{"key":"ref21","first-page":"1","article-title":"Lora: Low-rank adaptation of large language models","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Hu"},{"key":"ref22","first-page":"5232","article-title":"Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity","volume":"23","author":"Fedus","year":"2022","journal-title":"J. Mach. Learn. Res."},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3152247"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3361862"},{"key":"ref26","article-title":"Next-GPT: Any-to-any multimodal LLM","author":"Wu","year":"2023"},{"key":"ref27","article-title":"Meta-transformer: A unified framework for multimodal learning","author":"Zhang","year":"2023"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.521"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01457"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2020.3011148"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3369699"},{"key":"ref33","article-title":"Scaling instruction-finetuned language models","author":"Chung","year":"2022"},{"key":"ref34","article-title":"Llavar: Enhanced visual instruction tuning for text-rich image understanding","author":"Zhang"},{"key":"ref35","first-page":"1","article-title":"MiniGPT-4: Enhancing vision-language understanding with advanced large language models","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhu"},{"key":"ref36","article-title":"Qwen-VL: A frontier large vision-language model with versatile abilities","author":"Bai","year":"2023"},{"key":"ref37","article-title":"Visual ChatGPT: Talking, drawing and editing with visual foundation models","author":"Wu","year":"2023"},{"key":"ref38","article-title":"Llama-adapter v2: Parameter-efficient visual instruction model","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Gao"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2022.3141095"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3345844"},{"key":"ref41","first-page":"1","article-title":"Learning factored representations in a deep mixture of experts","volume-title":"Proc. Int. Conf. Learn. Representations Workshop","author":"Eigen"},{"key":"ref42","article-title":"GShard: Scaling giant models with conditional computation and automatic sharding","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lepikhin"},{"key":"ref43","first-page":"6265","article-title":"Base layers: Simplifying training of large, sparse models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Lewis"},{"key":"ref44","first-page":"5547","article-title":"GLaM: Efficient scaling of language models with mixture-of-experts","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Du"},{"key":"ref45","first-page":"49250","article-title":"Visual instruction tuning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Liu"},{"key":"ref46","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref47","article-title":"Vicuna: An open-source chatbot impressing GPT-4 with 90%* ChatGPT quality","author":"Chiang","year":"2023"},{"key":"ref48","first-page":"28 492","article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref49","first-page":"5178","article-title":"Beats: Audio pre-training with acoustic tokenizers","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Chen"},{"key":"ref50","first-page":"19730","article-title":"BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref51","article-title":"Phi-2: The surprising power of small language models","author":"Abdin","year":"2023"},{"key":"ref52","first-page":"1","article-title":"Openchat: Advancing open-source language models with mixed-quality data","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wang"},{"key":"ref53","first-page":"4218","article-title":"Common voice: A massively-multilingual speech corpus","volume-title":"Proc. Lang. Resour. Eval. Conf.","author":"Ardila"},{"key":"ref54","article-title":"Text to speech an AI speech feature that converts text to lifelike speech","year":"2024"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2015.7178964"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D17-1082"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/taslp.2024.3419446"},{"key":"ref58","first-page":"119","article-title":"Audiocaps: Generating captions for audios in the wild","volume-title":"Proc. Assoc. Comput. Linguistics","author":"Kim"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052990"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.23919\/EUSIPCO55093.2022.9909680"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1050"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.679"},{"key":"ref63","first-page":"146","article-title":"A-OKVQA: A benchmark for visual question answering using world knowledge","volume-title":"Proc. Eur. Conf. Comput. Vis.","author":"Schwenk"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00331"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.670"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298698"},{"key":"ref67","first-page":"190","article-title":"Collecting highly parallel data for paraphrase evaluation","volume-title":"Proc. Assoc. Comput. Linguistics","author":"Chen"},{"key":"ref68","first-page":"1","article-title":"Adam: A method for stochastic optimization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Kingma"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2023-1606"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49357.2023.10096877"},{"key":"ref71","first-page":"124","article-title":"Zero-shot video question answering via frozen bidirectional language models","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Yang"},{"key":"ref72","article-title":"VideoChat: Chat-centric video understanding","author":"Li","year":"2023"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-demo.49"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.20"},{"key":"ref75","article-title":"MM-Vet: Evaluating large multimodal models for integrated capabilities","author":"Yu","year":"2023"},{"key":"ref76","article-title":"Shikra: Unleashing multimodal LLM\u2019s referential dialogue magic","author":"Chen","year":"2023"},{"key":"ref77","article-title":"Qwen technical report","author":"Bai","year":"2023"},{"key":"ref78","article-title":"MobileVLM: A fast, reproducible and strong vision language assistant for mobile devices","author":"Chu","year":"2023"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1145\/3688863.3689575"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/34\/10958761\/10887014.pdf?arnumber=10887014","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,10]],"date-time":"2025-04-10T17:13:42Z","timestamp":1744305222000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10887014\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5]]},"references-count":79,"journal-issue":{"issue":"5"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2025.3532688","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,5]]}}}