{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,2]],"date-time":"2026-04-02T23:37:56Z","timestamp":1775173076767,"version":"3.50.1"},"reference-count":106,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"3","license":[{"start":{"date-parts":[[2025,3,1]],"date-time":"2025-03-01T00:00:00Z","timestamp":1740787200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"name":"National Key R&#x0026;D Program of China","award":["2022ZD0161000"],"award-info":[{"award-number":["2022ZD0161000"]}]},{"name":"General Research Fund of Hong Kong","award":["17200622"],"award-info":[{"award-number":["17200622"]}]},{"name":"General Research Fund of Hong Kong","award":["17209324"],"award-info":[{"award-number":["17209324"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2025,3]]},"DOI":"10.1109\/tpami.2024.3507000","type":"journal-article","created":{"date-parts":[[2024,11,27]],"date-time":"2024-11-27T19:33:44Z","timestamp":1732736024000},"page":"1877-1893","source":"Crossref","is-referenced-by-count":39,"title":["LVLM-EHub: A Comprehensive Evaluation Benchmark for Large Vision-Language Models"],"prefix":"10.1109","volume":"47","author":[{"ORCID":"https:\/\/orcid.org\/0009-0008-8742-2420","authenticated-orcid":false,"given":"Peng","family":"Xu","sequence":"first","affiliation":[{"name":"OpenGVLab, Shanghai AI Laboratory, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3781-4086","authenticated-orcid":false,"given":"Wenqi","family":"Shao","sequence":"additional","affiliation":[{"name":"OpenGVLab, Shanghai AI Laboratory, Shanghai, China"}]},{"given":"Kaipeng","family":"Zhang","sequence":"additional","affiliation":[{"name":"OpenGVLab, Shanghai AI Laboratory, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-7881-712X","authenticated-orcid":false,"given":"Peng","family":"Gao","sequence":"additional","affiliation":[{"name":"OpenGVLab, Shanghai AI Laboratory, Shanghai, China"}]},{"given":"Shuo","family":"Liu","sequence":"additional","affiliation":[{"name":"OpenGVLab, Shanghai AI Laboratory, Shanghai, China"}]},{"given":"Meng","family":"Lei","sequence":"additional","affiliation":[{"name":"OpenGVLab, Shanghai AI Laboratory, Shanghai, China"}]},{"given":"Fanqing","family":"Meng","sequence":"additional","affiliation":[{"name":"OpenGVLab, Shanghai AI Laboratory, Shanghai, China"}]},{"given":"Siyuan","family":"Huang","sequence":"additional","affiliation":[{"name":"OpenGVLab, Shanghai AI Laboratory, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1889-2567","authenticated-orcid":false,"given":"Yu","family":"Qiao","sequence":"additional","affiliation":[{"name":"OpenGVLab, Shanghai AI Laboratory, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6685-7950","authenticated-orcid":false,"given":"Ping","family":"Luo","sequence":"additional","affiliation":[{"name":"OpenGVLab, Shanghai AI Laboratory, Shanghai, China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"LLaMA: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref2","article-title":"Language models are few-shot learners","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Brown"},{"key":"ref3","article-title":"Vicuna: An open-source chatbot impressing GPT-4 with 90% ChatGPT quality","author":"Chiang","year":"2023"},{"key":"ref4","article-title":"GPT-4 technical report","year":"2023"},{"key":"ref5","article-title":"BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","author":"Li","year":"2023"},{"key":"ref6","first-page":"23716","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Alayrac"},{"key":"ref7","article-title":"LLaVA-med: Training a large language-and-vision assistant for biomedicine in one day","author":"Li","year":"2023"},{"key":"ref8","article-title":"LLaMA-adapter v2: Parameter-efficient visual instruction model","author":"Gao","year":"2023"},{"key":"ref9","article-title":"Transfer visual prompt generator across LLMs","author":"Zhang","year":"2023"},{"key":"ref10","article-title":"Visual instruction tuning","author":"Liu","year":"2023"},{"key":"ref11","article-title":"MiniGPT-4: Enhancing vision-language understanding with advanced large language models","author":"Zhu","year":"2023"},{"key":"ref12","article-title":"mPLUG-owl: Modularization empowers large language models with multimodality","author":"Ye","year":"2023"},{"key":"ref13","article-title":"Otter: A multi-modal model with in-context instruction tuning","author":"Li","year":"2023"},{"key":"ref14","article-title":"InstructBLIP: Towards general-purpose vision-language models with instruction tuning","author":"Dai","year":"2023"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.20"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.133"},{"key":"ref17","article-title":"On the hidden mystery of OCR in large multimodal models","author":"Liu","year":"2023"},{"key":"ref18","article-title":"What makes for good visual tokenizers for large language models?","author":"Wang","year":"2023"},{"key":"ref19","article-title":"MME: A comprehensive evaluation benchmark for multimodal large language models","author":"Fu","year":"2023"},{"key":"ref20","article-title":"MMBench: Is your multi-modal model an all-around player?","author":"Liu","year":"2023"},{"key":"ref21","article-title":"MM-vet: Evaluating large multimodal models for integrated capabilities","author":"Yu","year":"2023"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.findings-emnlp.755"},{"key":"ref23","article-title":"Introducing ChatGPT","year":"2022"},{"key":"ref24","article-title":"Microsoft COCO captions: Data collection and evaluation server","author":"Chen","year":"2015"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1238"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00356"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"key":"ref28","article-title":"COYO-700M: Image-text pair dataset","author":"Byeon","year":"2022"},{"key":"ref29","article-title":"LAION-400M: Open dataset of CLIP-filtered 400 million image-text pairs","author":"Schuhmann","year":"2021"},{"key":"ref30","article-title":"LAION COCO: 600 m synthetic captions from LAION2B-en","author":"Schuhmann","year":"2022"},{"key":"ref31","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref32","first-page":"1143","article-title":"Im2Text: Describing images using 1 million captioned photographs","volume-title":"Proc. Adv. neural Inf. Process. Syst.","author":"Ordonez"},{"key":"ref33","article-title":"What matters in training a GPT4-style language model with multimodal inputs?","author":"Zeng","year":"2023"},{"key":"ref34","article-title":"Visual ChatGPT: Talking, drawing and editing with visual foundation models","author":"Wu","year":"2023"},{"key":"ref35","article-title":"Fuyu-8B: A multimodal architecture for AI agents","year":"2023"},{"key":"ref36","article-title":"Gemini","author":"DeepMind","year":"2023"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1145\/1866029.1866080"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00851"},{"key":"ref39","article-title":"SEED-bench: Benchmarking multimodal LLMs with generative comprehension","author":"Li","year":"2023"},{"key":"ref40","article-title":"MMMU: A massive multi-discipline multimodal understanding and reasoning benchmark for expert AGI","author":"Yue","year":"2023"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/WACV48630.2021.00225"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2019.00156"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00686"},{"key":"ref44","first-page":"1","article-title":"Measuring massive multitask language understanding","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Hendrycks"},{"key":"ref45","first-page":"632","article-title":"A large annotated corpus for learning natural language inference","volume-title":"Proc. 2015 Conf. Empir. Methods Natural Lang. Process.","author":"Bowman"},{"key":"ref46","first-page":"353","article-title":"GLUE: A multi-task benchmark and analysis platform for natural language understanding","volume-title":"Proc. 2018 EMNLP Workshop BlackboxNLP: Analyzing Interpreting Neural Netw. NLP","author":"Wang"},{"key":"ref47","first-page":"1112","article-title":"A broad-coverage challenge corpus for sentence understanding through inference","volume-title":"Proc. 2018 Conf. North Amer. Chapter Assoc. Comput. Linguistics: Hum. Lang. Technol.","author":"Williams"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2012.6248074"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.350"},{"key":"ref52","first-page":"2048","article-title":"Leveraging procedural generation to benchmark reinforcement learning","volume-title":"Proc. 37th Int. Conf. Mach. Learn.","author":"Cobbe"},{"key":"ref53","first-page":"1835","article-title":"FinRL-Meta: Data-driven deep reinforcementlearning in quantitative finance","volume-title":"Proc. Data-Centric AI Workshop","author":"Liu"},{"key":"ref54","first-page":"1","article-title":"Avalon: A benchmark for RL generalization using procedurally generated worlds","volume-title":"Proc. 36th Conf. Neural Inf. Process. Syst. Datasets Benchmarks Track","author":"Albrecht"},{"key":"ref55","first-page":"1","article-title":"Challenges and opportunities in offline reinforcement learning from visual observations","volume-title":"Proc. Decis. Awareness Reinforcement Learn. Workshop ICML","author":"Lu"},{"key":"ref56","article-title":"GPT4Tools: Teaching LLM to use tools via self-instruction","author":"Yang","year":"2023"},{"key":"ref57","article-title":"MiniGPT-v2: Large language model as a unified interface for vision-language multi-task learning","author":"Chen","year":"2023"},{"key":"ref58","article-title":"Intel-LLaVA","year":"2024"},{"key":"ref59","article-title":"InternVL2","year":"2024"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i3.27999"},{"key":"ref61","article-title":"Fine-tuning multimodal LLMs to follow zero-shot demonstrative instructions","author":"Li","year":"2023"},{"key":"ref62","article-title":"OpenCompass: A universal evaluation platform for foundation models","year":"2023"},{"issue":"4","key":"ref63","article-title":"Learning multiple layers of features from tiny images","volume-title":"Proc. Handbook Systemic Autoimmune Dis.","volume":"1","author":"Krizhevsky"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2012.6248092"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/ICVGIP.2008.47"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2012.6247990"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2013.221"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2015.7333942"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2017.157"},{"issue":"18","key":"ref70","first-page":"8027","article-title":"A robust arbitrary text detection system for natural scene images","volume-title":"Expert Syst. Appl.","volume":"41","author":"Risnumawan","year":"2014"},{"issue":"9","key":"ref71","first-page":"2853","article-title":"End-to-end scene text recognition using tree-structured models","volume-title":"Pattern Recognit.","volume":"47","author":"Shi","year":"2014"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2013.76"},{"key":"ref73","article-title":"COCO-text: Dataset and benchmark for text detection and recognition in natural images","author":"Veit","year":"2016"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19815-1_18"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2019.02.002"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01393"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2019.00244"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/ICDARW.2019.10029"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1109\/ICDAR.2019.00251"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00331"},{"issue":"4","key":"ref81","article-title":"IconQA: A new benchmark for abstract diagram understanding and visual language reasoning","volume-title":"Proc. 35th Conf. Neural Inf. Process. Syst. (NeurIPS) Track Datasets Benchmarks","volume":"1","author":"Lu"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00566"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00247"},{"key":"ref84","first-page":"2507","article-title":"Learn to explain: Multimodal reasoning via thought chains for science question answering","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Lu"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00688"},{"key":"ref86","article-title":"FastChat","author":"Zheng","year":"2023"},{"key":"ref87","article-title":"Symbolic discovery of optimization algorithms","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Chen"},{"key":"ref88","article-title":"Reduction of class activation uncertainty with background information","author":"Kabir","year":"2023"},{"key":"ref89","article-title":"DINOv2: Learning robust visual features without supervision","author":"Oquab","year":"2023"},{"key":"ref90","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3244340"},{"key":"ref91","doi-asserted-by":"publisher","DOI":"10.1109\/tip.2024.3512354"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19815-1_11"},{"key":"ref93","article-title":"CentripetalText: An efficient text instance representation for scene text detection","volume-title":"Proc. 35th Conf. Neural Inf. Process. Syst.","author":"Sheng"},{"key":"ref94","first-page":"2579","article-title":"LayoutLMv2: Multi-modal pre-training for visually-rich document understanding","volume-title":"Proc. Annu. Meeting Assoc. Comput. Linguistics, Int. Joint Conf. Natural Lang. Process.","author":"Xu"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00685"},{"key":"ref96","article-title":"PaLI: A jointly-scaled multilingual language-image model","author":"Chen","year":"2023"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW56347.2022.00502"},{"key":"ref98","article-title":"Visual instruction tuning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Liu"},{"key":"ref99","article-title":"VQA-GNN: Reasoning with multimodal semantic graph for visual question answering","author":"Wang","year":"2022"},{"issue":"8","key":"ref100","first-page":"242","article-title":"The proposed USCF rating system. its development, theory, and applications","volume":"22","author":"Elo","year":"1967","journal-title":"Chess Life"},{"key":"ref101","article-title":"Judging LLM-as-a-judge with MT-bench and chatbot arena","author":"Zheng","year":"2023"},{"key":"ref102","article-title":"Training a helpful and harmless assistant with reinforcement learning from human feedback","author":"Bai","year":"2022"},{"key":"ref103","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01855"},{"key":"ref105","article-title":"Scaling instruction-finetuned language models","author":"Chung","year":"2022"},{"key":"ref106","article-title":"Instruction tuning with GPT-4","author":"Peng","year":"2023"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/34\/10873290\/10769058.pdf?arnumber=10769058","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,2,6]],"date-time":"2025-02-06T06:04:47Z","timestamp":1738821887000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10769058\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3]]},"references-count":106,"journal-issue":{"issue":"3"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2024.3507000","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,3]]}}}