{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,17]],"date-time":"2026-04-17T15:51:16Z","timestamp":1776441076139,"version":"3.51.2"},"publisher-location":"New York, NY, USA","reference-count":30,"publisher":"ACM","license":[{"start":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T00:00:00Z","timestamp":1730073600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.acm.org\/publications\/policies\/copyright_policy#Background"}],"funder":[{"name":"China Postdoctoral Science Fund","award":["2024M751559"],"award-info":[{"award-number":["2024M751559"]}]},{"name":"National Key R&D Program of China","award":["2022ZD0161600"],"award-info":[{"award-number":["2022ZD0161600"]}]},{"name":"Shanghai Postdoctoral Excellence Program","award":["2023023"],"award-info":[{"award-number":["2023023"]}]}],"content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2024,10,28]]},"DOI":"10.1145\/3664647.3685520","type":"proceedings-article","created":{"date-parts":[[2024,10,26]],"date-time":"2024-10-26T06:59:49Z","timestamp":1729925989000},"page":"11198-11201","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":46,"title":["VLMEvalKit: An Open-Source ToolKit for Evaluating Large Multi-Modality Models"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3052-4177","authenticated-orcid":false,"given":"Haodong","family":"Duan","sequence":"first","affiliation":[{"name":"Shanghai AI Laboratory, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4261-6271","authenticated-orcid":false,"given":"Junming","family":"Yang","sequence":"additional","affiliation":[{"name":"Southeast University, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-4402-3650","authenticated-orcid":false,"given":"Yuxuan","family":"Qiao","sequence":"additional","affiliation":[{"name":"Nanjing University, Nanjing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-3764-1266","authenticated-orcid":false,"given":"Xinyu","family":"Fang","sequence":"additional","affiliation":[{"name":"Zhejiang University, Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5935-3877","authenticated-orcid":false,"given":"Lin","family":"Chen","sequence":"additional","affiliation":[{"name":"University of Science and Technology of China, Hefei, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1386-6626","authenticated-orcid":false,"given":"Yuan","family":"Liu","sequence":"additional","affiliation":[{"name":"WeChat AI, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4654-835X","authenticated-orcid":false,"given":"Xiaoyi","family":"Dong","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, Hong Kong, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1110-5062","authenticated-orcid":false,"given":"Yuhang","family":"Zang","sequence":"additional","affiliation":[{"name":"Shanghai AI Laboratory, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2539-8815","authenticated-orcid":false,"given":"Pan","family":"Zhang","sequence":"additional","affiliation":[{"name":"Shanghai AI Laboratory, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4153-7352","authenticated-orcid":false,"given":"Jiaqi","family":"Wang","sequence":"additional","affiliation":[{"name":"Shanghai AI Laboratory, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8865-7896","authenticated-orcid":false,"given":"Dahua","family":"Lin","sequence":"additional","affiliation":[{"name":"The Chinese University of Hong Kong, Hong Kong, Hong Kong"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6820-2325","authenticated-orcid":false,"given":"Kai","family":"Chen","sequence":"additional","affiliation":[{"name":"Shanghai AI Laboratory, Shanghai, China"}]}],"member":"320","published-online":{"date-parts":[[2024,10,28]]},"reference":[{"key":"e_1_3_2_2_1_1","unstructured":"Anthropic. 2024. The claude 3 model family: Opus sonnet haiku. (2024)."},{"key":"e_1_3_2_2_2_1","volume-title":"ShareGPT4V: Improving Large Multi-Modal Models with Better Captions. arXiv:2311.12793","author":"Chen Lin","year":"2023","unstructured":"Lin Chen, Jisong Li, Xiaoyi Dong, Pan Zhang, Conghui He, Jiaqi Wang, Feng Zhao, and Dahua Lin. 2023. ShareGPT4V: Improving Large Multi-Modal Models with Better Captions. arXiv:2311.12793 (2023)."},{"key":"e_1_3_2_2_3_1","unstructured":"Lin Chen Jinsong Li Xiaoyi Dong Pan Zhang Yuhang Zang Zehui Chen Haodong Duan Jiaqi Wang et al. 2024. Are We on the Right Way for Evaluating Large Vision-Language Models? arXiv:2403.20330 (2024)."},{"key":"e_1_3_2_2_4_1","unstructured":"Lin Chen Xilin Wei Jinsong Li Xiaoyi Dong Pan Zhang Yuhang Zang Zehui Chen Haodong Duan Bin Lin Zhenyu Tang et al. 2024. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325 (2024)."},{"key":"e_1_3_2_2_5_1","unstructured":"Zhe Chen Weiyun Wang Hao Tian Shenglong Ye Zhangwei Gao Erfei Cui Wenwen Tong Kongzhi Hu Jiapeng Luo Zheng Ma Ji Ma Jiaqi Wang Xiaoyi Dong Hang Yan et al. 2024. How Far AreWe to GPT-4V? Closing the Gap to Commercial Multimodal Models with Open-Source Suites. arXiv:2404.16821 [cs.CV]"},{"key":"e_1_3_2_2_6_1","volume-title":"Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models. arXiv:2311.07919","author":"Chu Yunfei","year":"2023","unstructured":"Yunfei Chu, Jin Xu, Xiaohuan Zhou, Qian Yang, Shiliang Zhang, Zhijie Yan, et al. 2023. Qwen-audio: Advancing universal audio understanding via unified large-scale audio-language models. arXiv:2311.07919 (2023)."},{"key":"e_1_3_2_2_7_1","unstructured":"Xiaoyi Dong Pan Zhang Yuhang Zang Yuhang Cao Bin Wang Linke Ouyang Xilin Wei Songyang Zhang Haodong Duan Maosong Cao Wenwei Zhang et al. 2024. InternLM-XComposer2: Mastering Free-form Text-Image Composition and Comprehension in Vision-Language Large Model. arXiv:2401.16420 (2024)."},{"key":"e_1_3_2_2_8_1","volume-title":"GLM: General Language Model Pretraining with Autoregressive Blank Infilling. In ACL. 320--335.","author":"Du Zhengxiao","year":"2022","unstructured":"Zhengxiao Du, Yujie Qian, Xiao Liu, Ming Ding, Jiezhong Qiu, Zhilin Yang, and Jie Tang. 2022. GLM: General Language Model Pretraining with Autoregressive Blank Infilling. In ACL. 320--335."},{"key":"e_1_3_2_2_9_1","volume-title":"MMBench-Video: A Long-Form Multi-Shot Benchmark for Holistic Video Understanding. arXiv:2406.14515","author":"Fang Xinyu","year":"2024","unstructured":"Xinyu Fang, Kangrui Mao, Haodong Duan, Xiangyu Zhao, Yining Li, Dahua Lin, and Kai Chen. 2024. MMBench-Video: A Long-Form Multi-Shot Benchmark for Holistic Video Understanding. arXiv:2406.14515 (2024)."},{"key":"e_1_3_2_2_10_1","volume-title":"Onellm: One framework to align all modalities with language. In CVPR. 26584--26595.","author":"Han Jiaming","year":"2024","unstructured":"Jiaming Han, Kaixiong Gong, Yiyuan Zhang, Jiaqi Wang, Kaipeng Zhang, Dahua Lin, Yu Qiao, Peng Gao, and Xiangyu Yue. 2024. Onellm: One framework to align all modalities with language. In CVPR. 26584--26595."},{"key":"e_1_3_2_2_11_1","volume-title":"A diagram is worth a dozen images","author":"Kembhavi Aniruddha","unstructured":"Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. 2016. A diagram is worth a dozen images. In ECCV. Springer, 235--251."},{"key":"e_1_3_2_2_12_1","volume-title":"Mvbench: A comprehensive multi-modal video understanding benchmark. In CVPR. 22195--22206.","author":"Li Kunchang","year":"2024","unstructured":"Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. 2024. Mvbench: A comprehensive multi-modal video understanding benchmark. In CVPR. 22195--22206."},{"key":"e_1_3_2_2_13_1","volume-title":"Hallusionbench: You see what you think? or you think what you see? an imagecontext reasoning benchmark challenging for gpt-4v (ision), llava-1.5, and other multi-modality models. arXiv:2310.14566","author":"Liu Fuxiao","year":"2023","unstructured":"Fuxiao Liu, Tianrui Guan, Zongxia Li, Lichang Chen, Yaser Yacoob, et al. 2023. Hallusionbench: You see what you think? or you think what you see? an imagecontext reasoning benchmark challenging for gpt-4v (ision), llava-1.5, and other multi-modality models. arXiv:2310.14566 (2023)."},{"key":"e_1_3_2_2_14_1","unstructured":"Haotian Liu Chunyuan Li Yuheng Li Bo Li Yuanhan Zhang Sheng Shen and Yong Jae Lee. 2024. LLaVA-NeXT: Improved reasoning OCR and world knowledge. https:\/\/llava-vl.github.io\/blog\/2024-01-30-llava-next\/"},{"key":"e_1_3_2_2_15_1","volume-title":"Visual instruction tuning. arXiv:2304.08485","author":"Liu Haotian","year":"2023","unstructured":"Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. 2023. Visual instruction tuning. arXiv:2304.08485 (2023)."},{"key":"e_1_3_2_2_16_1","doi-asserted-by":"crossref","unstructured":"Yuan Liu Haodong Duan Yuanhan Zhang Bo Li Songyang Zhang Wangbo Zhao Yike Yuan Jiaqi Wang Conghui He et al. 2023. MMBench: Is your multi-modal model an all-around player? arXiv:2307.06281 (2023).","DOI":"10.1007\/978-3-031-72658-3_13"},{"key":"e_1_3_2_2_17_1","unstructured":"Yuliang Liu Zhang Li Hongliang Li Wenwen Yu Mingxin Huang Dezhi Peng Mingyu Liu Mingrui Chen Chunyuan Li Lianwen Jin et al. 2023. On the hidden mystery of ocr in large multimodal models. arXiv:2305.07895 (2023)."},{"key":"e_1_3_2_2_18_1","volume-title":"Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv:2310.02255","author":"Lu Pan","year":"2023","unstructured":"Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. 2023. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. arXiv:2310.02255 (2023)."},{"key":"e_1_3_2_2_19_1","volume-title":"Ajeet Kumar Singh, and Anirban Chakraborty","author":"Mishra Anand","year":"2019","unstructured":"Anand Mishra, Shashank Shekhar, Ajeet Kumar Singh, and Anirban Chakraborty. 2019. Ocr-vqa: Visual question answering by reading text in images. In ICDAR. IEEE, 947--952."},{"key":"e_1_3_2_2_20_1","unstructured":"OpenAI. 2023. ChatGPT. https:\/\/openai.com\/blog\/chatgpt."},{"key":"e_1_3_2_2_22_1","volume-title":"Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach.","author":"Singh Amanpreet","year":"2019","unstructured":"Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. 2019. Towards vqa models that can read. In CVPR. 8317--8326."},{"key":"e_1_3_2_2_23_1","volume-title":"Moviechat: From dense token to sparse memory for long video understanding. In CVPR. 18221--18232.","author":"Song Enxin","year":"2024","unstructured":"Enxin Song, Wenhao Chai, Guanhong Wang, Yucheng Zhang, Haoyang Zhou, Feiyang Wu, Haozhe Chi, Xun Guo, et al. 2024. Moviechat: From dense token to sparse memory for long video understanding. In CVPR. 18221--18232."},{"key":"e_1_3_2_2_24_1","unstructured":"Gemini Team Rohan Anil Sebastian Borgeaud Yonghui Wu Jean-Baptiste Alayrac Jiahui Yu Radu Soricut Johan Schalkwyk et al. 2023. Gemini: a family of highly capable multimodal models. arXiv:2312.11805 (2023)."},{"key":"e_1_3_2_2_25_1","unstructured":"InternLM Team. 2023. InternLM: A Multilingual Language Model with Progressively Enhanced Capabilities. https:\/\/github.com\/InternLM\/InternLMtechreport."},{"key":"e_1_3_2_2_26_1","volume-title":"Llama: Open and efficient foundation language models. arXiv:2302.13971","author":"Touvron Hugo","year":"2023","unstructured":"Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timoth\u00e9e Lacroix, Baptiste Rozi\u00e8re, Naman Goyal, Eric Hambro, Faisal Azhar, et al. 2023. Llama: Open and efficient foundation language models. arXiv:2302.13971 (2023)."},{"key":"e_1_3_2_2_27_1","volume-title":"Pointllm: Empowering large language models to understand point clouds. arXiv:2308.16911","author":"Xu Runsen","year":"2023","unstructured":"Runsen Xu, Xiaolong Wang, Tai Wang, Yilun Chen, Jiangmiao Pang, and Dahua Lin. 2023. Pointllm: Empowering large language models to understand point clouds. arXiv:2308.16911 (2023)."},{"key":"e_1_3_2_2_28_1","doi-asserted-by":"crossref","unstructured":"Le Xue Ning Yu Shu Zhang Artemis Panagopoulou Junnan Li Roberto Mart\u00edn-Mart\u00edn Jiajun Wu Caiming Xiong Ran Xu et al. 2024. Ulip-2: Towards scalable multimodal pre-training for 3d understanding. In CVPR. 27091--27101.","DOI":"10.1109\/CVPR52733.2024.02558"},{"key":"e_1_3_2_2_29_1","unstructured":"Qinghao Ye Haiyang Xu Jiabo Ye Ming Yan Anwen Hu Haowei Liu Qi Qian Ji Zhang et al. 2023. mPLUG-Owl2: Revolutionizing Multi-modal Large Language Model with Modality Collaboration. arXiv:2311.04257 [cs.CL]"},{"key":"e_1_3_2_2_30_1","volume-title":"Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv:2308.02490","author":"Yu Weihao","year":"2023","unstructured":"Weihao Yu, Zhengyuan Yang, Linjie Li, Jianfeng Wang, Kevin Lin, Zicheng Liu, Xinchao Wang, and Lijuan Wang. 2023. Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv:2308.02490 (2023)."},{"key":"e_1_3_2_2_31_1","volume-title":"Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. arXiv:2311.16502","author":"Yue Xiang","year":"2023","unstructured":"Xiang Yue, Yuansheng Ni, Kai Zhang, Tianyu Zheng, Ruoqi Liu, Ge Zhang, Samuel Stevens, Dongfu Jiang, Weiming Ren, Yuxuan Sun, et al. 2023. Mmmu: A massive multi-discipline multimodal understanding and reasoning benchmark for expert agi. arXiv:2311.16502 (2023)."}],"event":{"name":"MM '24: The 32nd ACM International Conference on Multimedia","location":"Melbourne VIC Australia","acronym":"MM '24","sponsor":["SIGMM ACM Special Interest Group on Multimedia"]},"container-title":["Proceedings of the 32nd ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3664647.3685520","content-type":"unspecified","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3664647.3685520","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,19]],"date-time":"2025-06-19T01:17:28Z","timestamp":1750295848000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3664647.3685520"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,10,28]]},"references-count":30,"alternative-id":["10.1145\/3664647.3685520","10.1145\/3664647"],"URL":"https:\/\/doi.org\/10.1145\/3664647.3685520","relation":{},"subject":[],"published":{"date-parts":[[2024,10,28]]},"assertion":[{"value":"2024-10-28","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}