{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,4]],"date-time":"2026-04-04T17:55:52Z","timestamp":1775325352702,"version":"3.50.1"},"reference-count":97,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,3,1]],"date-time":"2026-03-01T00:00:00Z","timestamp":1772323200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Research Grants Council","award":["AoE\/E-601\/22-R"],"award-info":[{"award-number":["AoE\/E-601\/22-R"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1109\/tpami.2025.3637265","type":"journal-article","created":{"date-parts":[[2025,11,26]],"date-time":"2025-11-26T19:03:54Z","timestamp":1764183834000},"page":"3530-3543","source":"Crossref","is-referenced-by-count":17,"title":["Mini-Gemini: Mining the Potential of Multi-Modality Vision Language Models"],"prefix":"10.1109","volume":"48","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-2736-132X","authenticated-orcid":false,"given":"Yanwei","family":"Li","sequence":"first","affiliation":[{"name":"Department of Computer Science and Engineering, The Chinese University of Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-9112-0216","authenticated-orcid":false,"given":"Yuechen","family":"Zhang","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, The Chinese University of Hong Kong, Hong Kong"}]},{"given":"Chengyao","family":"Wang","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, The Chinese University of Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0009-0002-7831-8636","authenticated-orcid":false,"given":"Zhisheng","family":"Zhong","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, The Chinese University of Hong Kong, Hong Kong"}]},{"given":"Yixin","family":"Chen","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, The Chinese University of Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9057-745X","authenticated-orcid":false,"given":"Ruihang","family":"Chu","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, The Chinese University of Hong Kong, Hong Kong"}]},{"given":"Shaoteng","family":"Liu","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, The Chinese University of Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1246-553X","authenticated-orcid":false,"given":"Jiaya","family":"Jia","sequence":"additional","affiliation":[{"name":"Department of Computer Science and Engineering, The Hong Kong University of Science and Technology, Hong Kong"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Chatgpt","year":"2023"},{"key":"ref2","article-title":"Opt: Open pre-trained transformer language models","author":"Zhang","year":"2022"},{"key":"ref3","article-title":"LLaMA: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref4","article-title":"Gpt-4 technical report","year":"2023"},{"key":"ref5","article-title":"Gemini: A family of highly capable multimodal models","author":"Team","year":"2023"},{"key":"ref6","article-title":"BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","author":"Li","year":"2023"},{"key":"ref7","first-page":"34892","article-title":"Visual instruction tuning","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Liu","year":"2023"},{"key":"ref8","article-title":"MiniGPT-4: Enhancing vision-language understanding with advanced large language models","author":"Zhu","year":"2023"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-demo.49"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72952-2_19"},{"key":"ref11","article-title":"Sphinx: The joint mixing of weights, tasks, and visual embeddings for multi-modal large language models","author":"Lin","year":"2023"},{"key":"ref12","article-title":"Otterhd: A high-resolution multi-modality model","author":"Li","year":"2023"},{"key":"ref13","article-title":"Llava-next: Improved reasoning, OCR, and world knowledge","author":"Liu","year":"2024"},{"key":"ref14","article-title":"Instructblip: Towards general-purpose vision-language models with instruction tuning","author":"Dai","year":"2023"},{"key":"ref15","article-title":"Introducing our multimodal models","author":"Bavishi","year":"2023"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72643-9_22"},{"key":"ref17","article-title":"Allava: Harnessing GPT4V-synthesized data for a lite vision-language model","author":"Chen","year":"2024"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.670"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-86331-9_50"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.177"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_15"},{"key":"ref22","first-page":"55006","article-title":"Lima: Less is more for alignment","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Zhou","year":"2024"},{"key":"ref23","first-page":"47669","article-title":"Openassistant conversations-democratizing large language model alignment","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"K\u00f6pf","year":"2024"},{"key":"ref24","article-title":"SDXL: Improving latent diffusion models for high-resolution image synthesis","author":"Podell","year":"2023"},{"key":"ref25","article-title":"Qwen-VL: A frontier large vision-language model with versatile abilities","author":"Bai","year":"2023"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72658-3_13"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00913"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.24818\/ida-ql\/2019.5"},{"key":"ref30","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Brown","year":"2020"},{"key":"ref31","article-title":"Mixtral of experts","author":"Jiang","year":"2024"},{"key":"ref32","article-title":"Finetuned language models are zero-shot learners","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wei","year":"2022"},{"key":"ref33","first-page":"27730","article-title":"Training language models to follow instructions with human feedback","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Ouyang","year":"2022"},{"key":"ref34","article-title":"Stanford Alpaca: An instruction-following LLaMA model","author":"Taori","year":"2023"},{"key":"ref35","article-title":"Vicuna: An open-source chatbot impressing GPT-4 with 90% * chatGPT quality","author":"Chiang","year":"2023"},{"key":"ref36","article-title":"Visual ChatGPT: Talking, drawing and editing with visual foundation models","author":"Wu","year":"2023"},{"key":"ref37","article-title":"GPT4tools: Teaching large language model to use tools via self-instruction","author":"Yang","year":"2023"},{"key":"ref38","article-title":"Gemma: Introducing new state-of-the-art open models","year":"2024"},{"key":"ref39","article-title":"Microsoft COCO Captions: Data collection and evaluation server","author":"Chen","year":"2015"},{"key":"ref40","first-page":"2507","article-title":"Learn to explain: Multimodal reasoning via thought chains for science question answering","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Lu","year":"2022"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.00915"},{"key":"ref42","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford","year":"2021"},{"key":"ref43","first-page":"23716","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Alayrac","year":"2022"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.02484"},{"key":"ref45","article-title":"Internlm-Xcomposer: A vision-language large model for advanced text-image comprehension and composition","author":"Zhang","year":"2023"},{"key":"ref46","article-title":"Internlm-Xcomposer2: Mastering free-form text-image composition and comprehension in vision-language large model","author":"Dong","year":"2024"},{"key":"ref47","article-title":"Generative pretraining in multimodality","author":"Sun","year":"2023"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.01365"},{"key":"ref49","article-title":"Planting a seed of vision in large language model","author":"Ge","year":"2023"},{"key":"ref50","article-title":"Making LLaMA see and draw with seed tokenizer","author":"Ge","year":"2023"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72920-1_22"},{"key":"ref52","article-title":"Chatillusion: Efficient-aligning interleaved generation ability with visual instruction model","author":"Chi","year":"2023"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.521"},{"issue":"3","key":"ref54","article-title":"Improving image generation with better captions","volume-title":"Comput. Sci.","volume":"2","author":"Betker","year":"2023"},{"key":"ref55","first-page":"25278","article-title":"LAION-5B: An open large-scale dataset for training next generation image-text models","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Schuhmann","year":"2022"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"ref57","article-title":"Jointly training large autoregressive multimodal models","author":"Aiello","year":"2023"},{"key":"ref58","article-title":"Wuerstchen: An efficient architecture for large-scale text-to-image diffusion models","volume-title":"Proc. 12th Int. Conf. Learn. Representations","author":"Pernias","year":"2024"},{"key":"ref59","article-title":"Video generation models as world simulators","year":"2024"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1238"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58536-5_44"},{"key":"ref62","article-title":"Laion\/gpt4v-dataset","author":"eV","year":"2023"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00592"},{"key":"ref64","first-page":"1143","article-title":"Im2text: Describing images using 1 million captioned photographs","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Ordonez","year":"2011"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.905"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00686"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1145\/2812802"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2019.00156"},{"key":"ref72","article-title":"Stable-diffusion-prompts","year":"2023"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00851"},{"key":"ref74","article-title":"Awesome multilingual OCR toolkits based on paddlepaddle","year":"2020"},{"key":"ref75","article-title":"MobileVLM: A fast, reproducible and strong vision language assistant for mobile devices","author":"Chu","year":"2023"},{"key":"ref76","article-title":"Shikra: Unleashing multimodal LLM\u2019s referential dialogue magic","author":"Chen","year":"2023"},{"key":"ref77","article-title":"Introducing idefics: An open reproduction of state-of-the-art visual language model","year":"2023"},{"key":"ref78","article-title":"CogVLM: Visual expert for pretrained language models","author":"Wang","year":"2023"},{"key":"ref79","article-title":"MME: A comprehensive evaluation benchmark for multimodal large language models","author":"Fu","year":"2023"},{"key":"ref80","article-title":"MM-Vet: Evaluating large multimodal models for integrated capabilities","author":"Yu","year":"2023"},{"key":"ref81","article-title":"Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Lu","year":"2024"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.20"},{"key":"ref83","article-title":"Seed-bench: Benchmarking multimodal LLMs with generative comprehension","author":"Li","year":"2023"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1007\/s11432-024-4235-6"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/WACV48630.2021.00225"},{"key":"ref86","article-title":"Q-Bench: A benchmark for general-purpose foundation models on low-level vision","author":"Wu","year":"2023"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01363"},{"key":"ref88","article-title":"Wise: A world knowledge-informed semantic evaluation for text-to-image generation","author":"Niu","year":"2025"},{"key":"ref89","article-title":"VILA-U: A unified foundation model integrating visual understanding and generation","author":"Wu","year":"2024"},{"key":"ref90","article-title":"Emu 3: Next-token prediction is all you need","author":"Wang","year":"2024"},{"key":"ref91","article-title":"Show-O: One single transformer to unify multimodal understanding and generation","author":"Xie","year":"2024"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.01210"},{"key":"ref93","first-page":"12606","article-title":"Scaling rectified flow transformers for high-resolution image synthesis","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Esser","year":"2024"},{"key":"ref94","article-title":"Flux","author":"Labs","year":"2025"},{"key":"ref95","article-title":"Transfer between modalities with metaqueries","author":"Pan","year":"2025"},{"key":"ref96","article-title":"Qwen2.5-VL technical report","author":"Bai","year":"2025"},{"key":"ref97","first-page":"36479","article-title":"Photorealistic text-to-image diffusion models with deep language understanding","volume-title":"Proc. Int. Conf. Adv. Neural Inf. Process. Syst.","author":"Saharia","year":"2022"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/34\/11372200\/11269745.pdf?arnumber=11269745","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,9]],"date-time":"2026-02-09T21:05:56Z","timestamp":1770671156000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11269745\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3]]},"references-count":97,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2025.3637265","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,3]]}}}