{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,24]],"date-time":"2026-02-24T18:39:54Z","timestamp":1771958394694,"version":"3.50.1"},"publisher-location":"New York, NY, USA","reference-count":72,"publisher":"ACM","content-domain":{"domain":["dl.acm.org"],"crossmark-restriction":true},"short-container-title":[],"published-print":{"date-parts":[[2025,10,27]]},"DOI":"10.1145\/3746027.3755363","type":"proceedings-article","created":{"date-parts":[[2025,10,25]],"date-time":"2025-10-25T06:54:15Z","timestamp":1761375255000},"page":"10994-11003","update-policy":"https:\/\/doi.org\/10.1145\/crossmark-policy","source":"Crossref","is-referenced-by-count":1,"title":["Mavors: Multi-granularity Video Representation for Multimodal Large Language Model"],"prefix":"10.1145","author":[{"ORCID":"https:\/\/orcid.org\/0009-0003-9241-236X","authenticated-orcid":false,"given":"Yang","family":"Shi","sequence":"first","affiliation":[{"name":"Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5183-8538","authenticated-orcid":false,"given":"Jiaheng","family":"Liu","sequence":"additional","affiliation":[{"name":"Nanjing University, Suzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5258-2397","authenticated-orcid":false,"given":"Yushuo","family":"Guan","sequence":"additional","affiliation":[{"name":"Kling Team, Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-4313-2192","authenticated-orcid":false,"given":"Zhenhua","family":"Wu","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1460-8124","authenticated-orcid":false,"given":"Yuanxing","family":"Zhang","sequence":"additional","affiliation":[{"name":"Kling Team, Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1869-113X","authenticated-orcid":false,"given":"Zihao","family":"Wang","sequence":"additional","affiliation":[{"name":"Kling Team, Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2440-6585","authenticated-orcid":false,"given":"Weihong","family":"Lin","sequence":"additional","affiliation":[{"name":"Kling Team, Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-6468-6697","authenticated-orcid":false,"given":"Jingyun","family":"Hua","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-9128-4254","authenticated-orcid":false,"given":"Zekun","family":"Wang","sequence":"additional","affiliation":[{"name":"Kling Team, Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-7146-9782","authenticated-orcid":false,"given":"Xinlong","family":"Chen","sequence":"additional","affiliation":[{"name":"Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0009-0999-6231","authenticated-orcid":false,"given":"Bohan","family":"Zeng","sequence":"additional","affiliation":[{"name":"Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7532-5550","authenticated-orcid":false,"given":"Wentao","family":"Zhang","sequence":"additional","affiliation":[{"name":"Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6079-6392","authenticated-orcid":false,"given":"Fuzheng","family":"Zhang","sequence":"additional","affiliation":[{"name":"Kuaishou Technology, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6997-0406","authenticated-orcid":false,"given":"Wenjing","family":"Yang","sequence":"additional","affiliation":[{"name":"Researcher, Changsha, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-5475-2728","authenticated-orcid":false,"given":"Di","family":"Zhang","sequence":"additional","affiliation":[{"name":"Kling Team, Kuaishou Technology, Beijing, China"}]}],"member":"320","published-online":{"date-parts":[[2025,10,27]]},"reference":[{"key":"e_1_3_2_1_1_1","volume-title":"Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022","author":"Alayrac Jean-Baptiste","year":"2022","unstructured":"Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, Roman Ring, Eliza Rutherford, Serkan Cabi, Tengda Han, Zhitao Gong, Sina Samangooei, Marianne Monteiro, Jacob L. Menick, Sebastian Borgeaud, Andy Brock, Aida Nematzadeh, Sahand Sharifzadeh, Mikolaj Binkowski, Ricardo Barreira, Oriol Vinyals, Andrew Zisserman, and Kar\u00e9n Simonyan. 2022. Flamingo: a Visual Language Model for Few-Shot Learning. In Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022, Sanmi Koyejo, S. Mohamed, A. Agarwal, Danielle Belgrave, K. Cho, and A. Oh (Eds.)."},{"key":"e_1_3_2_1_2_1","volume-title":"arXiv preprint arXiv:2502.13923","author":"Bai Shuai","year":"2025","unstructured":"Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, Humen Zhong, Yuanzhi Zhu, Mingkun Yang, Zhaohai Li, Jianqiang Wan, Pengfei Wang, Wei Ding, Zheren Fu, Yiheng Xu, Jiabo Ye, Xi Zhang, Tianbao Xie, Zesen Cheng, Hang Zhang, Zhibo Yang, Haiyang Xu, and Junyang Lin. 2025. Qwen2.5-VL Technical Report. arXiv preprint arXiv:2502.13923 (2025)."},{"key":"e_1_3_2_1_3_1","unstructured":"Rohan Bavishi Erich Elsen Curtis Hawthorne Maxwell Nye Augustus Odena Arushi Somani and Sa\u011fnak Ta\u015firlar. 2023. Introducing our Multimodal Models."},{"key":"e_1_3_2_1_4_1","unstructured":"Guo Chen Zhiqi Li Shihao Wang Jindong Jiang Yicheng Liu Lidong Lu De-An Huang Wonmin Byeon Matthieu Le Tuomas Rintamaki et al. 2025a. Eagle 2.5: Boosting long-context post-training for frontier vision-language models. arXiv preprint arXiv:2504.15271 (2025)."},{"key":"e_1_3_2_1_5_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01265"},{"key":"e_1_3_2_1_6_1","volume-title":"VersaVid-R1: A Versatile Video Understanding and Reasoning Model from Question Answering to Captioning Tasks. arXiv preprint arXiv:2506.09079","author":"Chen Xinlong","year":"2025","unstructured":"Xinlong Chen, Yuanxing Zhang, Yushuo Guan, Bohan Zeng, Yang Shi, Sihan Yang, Pengfei Wan, Qiang Liu, Liang Wang, and Tieniu Tan. 2025b. VersaVid-R1: A Versatile Video Understanding and Reasoning Model from Question Answering to Captioning Tasks. arXiv preprint arXiv:2506.09079 (2025)."},{"key":"e_1_3_2_1_7_1","volume-title":"Mixture of Decoding: An Attention-Inspired Adaptive Decoding Strategy to Mitigate Hallucinations in Large Vision-Language Models. arXiv preprint arXiv:2505.17061","author":"Chen Xinlong","year":"2025","unstructured":"Xinlong Chen, Yuanxing Zhang, Qiang Liu, Junfei Wu, Fuzheng Zhang, and Tieniu Tan. 2025c. Mixture of Decoding: An Attention-Inspired Adaptive Decoding Strategy to Mitigate Hallucinations in Large Vision-Language Models. arXiv preprint arXiv:2505.17061 (2025)."},{"key":"e_1_3_2_1_8_1","unstructured":"Zhe Chen Weiyun Wang Yue Cao Yangzhou Liu Zhangwei Gao Erfei Cui Jinguo Zhu Shenglong Ye Hao Tian Zhaoyang Liu et al. 2024b. Expanding performance boundaries of open-source multimodal models with model data and test-time scaling. arXiv preprint arXiv:2412.05271 (2024)."},{"key":"e_1_3_2_1_9_1","volume-title":"Video-ccam: Enhancing video-language understanding with causal cross-attention masks for short and long videos. arXiv preprint arXiv:2408.14023","author":"Fei Jiajun","year":"2024","unstructured":"Jiajun Fei, Dian Li, Zhidong Deng, Zekun Wang, Gang Liu, and Hui Wang. 2024. Video-ccam: Enhancing video-language understanding with causal cross-attention masks for short and long videos. arXiv preprint arXiv:2408.14023 (2024)."},{"key":"e_1_3_2_1_10_1","unstructured":"Chaoyou Fu Yuhan Dai Yondong Luo Lei Li Shuhuai Ren Renrui Zhang Zihan Wang Chenyu Zhou Yunhang Shen Mengdan Zhang et al. 2024. Video-MME: The First-Ever Comprehensive Evaluation Benchmark of Multi-modal LLMs in Video Analysis. ArXiv preprint Vol. abs\/2405.21075 (2024)."},{"key":"e_1_3_2_1_11_1","unstructured":"Chaoyou Fu Haojia Lin Xiong Wang Yi-Fan Zhang Yunhang Shen Xiaoyu Liu Haoyu Cao Zuwei Long Heting Gao Ke Li et al. 2025. Vita-1.5: Towards gpt-4o level real-time vision and speech interaction. arXiv preprint arXiv:2501.01957 (2025)."},{"key":"e_1_3_2_1_12_1","volume-title":"Unlocking multimodal understanding across millions of tokens of context. ArXiv preprint","author":"Team Gemini","year":"2024","unstructured":"Gemini Team. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. ArXiv preprint, Vol. abs\/2403.05530 (2024)."},{"key":"e_1_3_2_1_13_1","volume-title":"Any2anytryon: Leveraging adaptive position embeddings for versatile virtual clothing tasks. arXiv preprint arXiv:2501.15891","author":"Guo Hailong","year":"2025","unstructured":"Hailong Guo, Bohan Zeng, Yiren Song, Wentao Zhang, Chuang Zhang, and Jiaming Liu. 2025. Any2anytryon: Leveraging adaptive position embeddings for versatile virtual clothing tasks. arXiv preprint arXiv:2501.15891 (2025)."},{"key":"e_1_3_2_1_14_1","volume-title":"MMWorld: Towards Multi-discipline Multi-faceted World Model Evaluation in Videos. In The Thirteenth International Conference on Learning Representations.","author":"He Xuehai","unstructured":"Xuehai He, Weixi Feng, Kaizhi Zheng, Yujie Lu, Wanrong Zhu, Jiachen Li, Yue Fan, Jianfeng Wang, Linjie Li, Zhengyuan Yang, et al., [n.d.]. MMWorld: Towards Multi-discipline Multi-faceted World Model Evaluation in Videos. In The Thirteenth International Conference on Learning Representations."},{"key":"e_1_3_2_1_15_1","unstructured":"Wenyi Hong Weihan Wang Ming Ding Wenmeng Yu Qingsong Lv Yan Wang Yean Cheng Shiyu Huang Junhui Ji Zhao Xue et al. 2024. Cogvlm2: Visual language models for image and video understanding. arXiv preprint arXiv:2408.16500 (2024)."},{"key":"e_1_3_2_1_16_1","unstructured":"Aaron Hurst Adam Lerer Adam P Goucher Adam Perelman Aditya Ramesh Aidan Clark AJ Ostrow Akila Welihinda Alan Hayes Alec Radford et al. 2024. Gpt-4o system card. arXiv preprint arXiv:2410.21276 (2024)."},{"key":"e_1_3_2_1_17_1","unstructured":"Jindong Jiang Xiuyu Li Zhijian Liu Muyang Li Guo Chen Zhiqi Li De-An Huang Guilin Liu Zhiding Yu Kurt Keutzer Sungjin Ahn Jan Kautz Hongxu Yin Yao Lu Song Han and Wonmin Byeon. 2025. Token-Efficient Long Video Understanding for Multimodal LLMs. https:\/\/api.semanticscholar.org\/CorpusID:276813638"},{"key":"e_1_3_2_1_18_1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46493-0_15"},{"key":"e_1_3_2_1_19_1","doi-asserted-by":"publisher","DOI":"10.1145\/3600006.3613165"},{"key":"e_1_3_2_1_20_1","volume-title":"2024 e. LLaVA-OneVision: Easy Visual Task Transfer. ArXiv preprint","author":"Li Bo","year":"2024","unstructured":"Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. 2024 e. LLaVA-OneVision: Easy Visual Task Transfer. ArXiv preprint, Vol. abs\/2408.03326 (2024)."},{"key":"e_1_3_2_1_21_1","volume-title":"VideoChat: Chat-Centric Video Understanding. ArXiv preprint","author":"Li Kunchang","year":"2023","unstructured":"Kunchang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. 2023. VideoChat: Chat-Centric Video Understanding. ArXiv preprint, Vol. abs\/2305.06355 (2023)."},{"key":"e_1_3_2_1_22_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02095"},{"key":"e_1_3_2_1_23_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00598"},{"key":"e_1_3_2_1_24_1","volume-title":"Videochat-flash: Hierarchical compression for long-context video modeling. arXiv preprint arXiv:2501.00574","author":"Li Xinhao","year":"2024","unstructured":"Xinhao Li, Yi Wang, Jiashuo Yu, Xiangyu Zeng, Yuhan Zhu, Haian Huang, Jianfei Gao, Kunchang Li, Yinan He, Chenting Wang, et al., 2024c. Videochat-flash: Hierarchical compression for long-context video modeling. arXiv preprint arXiv:2501.00574 (2024)."},{"key":"e_1_3_2_1_25_1","volume-title":"European Conference on Computer Vision.","author":"Li Yanwei","year":"2024","unstructured":"Yanwei Li, Chengyao Wang, and Jiaya Jia. 2024a. LLaMA-VID: An Image is Worth 2 Tokens in Large Language Models. European Conference on Computer Vision."},{"key":"e_1_3_2_1_26_1","volume-title":"Video-LLaVA: Learning United Visual Representation by Alignment Before Projection. ArXiv preprint","author":"Lin Bin","year":"2023","unstructured":"Bin Lin, Bin Zhu, Yang Ye, Munan Ning, Peng Jin, and Li Yuan. 2023b. Video-LLaVA: Learning United Visual Representation by Alignment Before Projection. ArXiv preprint, Vol. abs\/2311.10122 (2023)."},{"key":"e_1_3_2_1_27_1","volume-title":"VILA: On Pre-training for Visual Language Models. arXiv:2312.07533 [cs.CV]","author":"Lin Ji","year":"2023","unstructured":"Ji Lin, Hongxu Yin, Wei Ping, Yao Lu, Pavlo Molchanov, Andrew Tao, Huizi Mao, Jan Kautz, Mohammad Shoeybi, and Song Han. 2023a. VILA: On Pre-training for Visual Language Models. arXiv:2312.07533 [cs.CV]"},{"key":"e_1_3_2_1_28_1","volume-title":"Playground v3: Improving text-to-image alignment with deep-fusion large language models. arXiv preprint arXiv:2409.10695","author":"Liu Bingchen","year":"2024","unstructured":"Bingchen Liu, Ehsan Akhgari, Alexander Visheratin, Aleks Kamko, Linmiao Xu, Shivam Shrirao, Chase Lambert, Joao Souza, Suhail Doshi, and Daiqing Li. 2024a. Playground v3: Improving text-to-image alignment with deep-fusion large language models. arXiv preprint arXiv:2409.10695 (2024)."},{"key":"e_1_3_2_1_29_1","unstructured":"Haotian Liu Chunyuan Li Yuheng Li and Yong Jae Lee. 2023a. Improved Baselines with Visual Instruction Tuning."},{"key":"e_1_3_2_1_30_1","unstructured":"Haotian Liu Chunyuan Li Yuheng Li Bo Li Yuanhan Zhang Sheng Shen and Yong Jae Lee. 2024c. LLaVA-NeXT: Improved reasoning OCR and world knowledge."},{"key":"e_1_3_2_1_31_1","volume-title":"Visual Instruction Tuning. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023","author":"Liu Haotian","year":"2023","unstructured":"Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. 2023b. Visual Instruction Tuning. In Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (Eds.)."},{"key":"e_1_3_2_1_32_1","volume-title":"Jianbin Jiao, Enhua Wu, and Jie Hu. 2024 e. Kangaroo: A Powerful Video-Language Model Supporting Long-context Video Input. arXiv preprint arXiv:2408.15542","author":"Liu Jiajun","year":"2024","unstructured":"Jiajun Liu, Yibing Wang, Hanghang Ma, Xiaoping Wu, Xiaoqi Ma, xiaoming Wei, Jianbin Jiao, Enhua Wu, and Jie Hu. 2024 e. Kangaroo: A Powerful Video-Language Model Supporting Long-context Video Input. arXiv preprint arXiv:2408.15542 (2024)."},{"key":"e_1_3_2_1_33_1","volume-title":"Attention-guided self-reflection for zero-shot hallucination detection in large language models. arXiv preprint arXiv:2501.09997","author":"Liu Qiang","year":"2025","unstructured":"Qiang Liu, Xinlong Chen, Yue Ding, Shizhen Xu, Shu Wu, and Liang Wang. 2025. Attention-guided self-reflection for zero-shot hallucination detection in large language models. arXiv preprint arXiv:2501.09997 (2025)."},{"key":"e_1_3_2_1_34_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.517"},{"key":"e_1_3_2_1_35_1","volume-title":"Oryx MLLM: On-Demand Spatial-Temporal Understanding at Arbitrary Resolution. arXiv preprint arXiv:2409.12961","author":"Liu Zuyan","year":"2024","unstructured":"Zuyan Liu, Yuhao Dong, Ziwei Liu, Winston Hu, Jiwen Lu, and Yongming Rao. 2024b. Oryx MLLM: On-Demand Spatial-Temporal Understanding at Arbitrary Resolution. arXiv preprint arXiv:2409.12961 (2024)."},{"key":"e_1_3_2_1_36_1","volume-title":"NVILA: Efficient frontier visual language models. arXiv preprint arXiv:2412.04468","author":"Liu Zhijian","year":"2024","unstructured":"Zhijian Liu, Ligeng Zhu, Baifeng Shi, Zhuoyang Zhang, Yuming Lou, Shang Yang, Haocheng Xi, Shiyi Cao, Yuxian Gu, Dacheng Li, et al., 2024 f. NVILA: Efficient frontier visual language models. arXiv preprint arXiv:2412.04468 (2024)."},{"key":"e_1_3_2_1_37_1","volume-title":"MathVista: Evaluating Math Reasoning in Visual Contexts with GPT-4V, Bard, and Other Large Multimodal Models. ArXiv preprint","author":"Lu Pan","year":"2023","unstructured":"Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. 2023. MathVista: Evaluating Math Reasoning in Visual Contexts with GPT-4V, Bard, and Other Large Multimodal Models. ArXiv preprint, Vol. abs\/2310.02255 (2023)."},{"key":"e_1_3_2_1_38_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02453"},{"key":"e_1_3_2_1_39_1","volume-title":"Video-RAG: Visually-aligned Retrieval-Augmented Long Video Comprehension. arXiv preprint arXiv:2411.13093","author":"Luo Yongdong","year":"2024","unstructured":"Yongdong Luo, Xiawu Zheng, Xiao Yang, Guilin Li, Haojia Lin, Jinfa Huang, Jiayi Ji, Fei Chao, Jiebo Luo, and Rongrong Ji. 2024. Video-RAG: Visually-aligned Retrieval-Augmented Long Video Comprehension. arXiv preprint arXiv:2411.13093 (2024)."},{"key":"e_1_3_2_1_40_1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.679"},{"key":"e_1_3_2_1_41_1","first-page":"42748","article-title":"Perception test: A diagnostic benchmark for multimodal video models","volume":"36","author":"Patraucean Viorica","year":"2023","unstructured":"Viorica Patraucean, Lucas Smaira, Ankush Gupta, Adria Recasens, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Mateusz Malinowski, Yi Yang, Carl Doersch, et al., 2023. Perception test: A diagnostic benchmark for multimodal video models. Advances in Neural Information Processing Systems, Vol. 36 (2023), 42748-42761.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_42_1","volume-title":"Proceedings of the 38th International Conference on Machine Learning, ICML 2021","volume":"8763","author":"Radford Alec","year":"2021","unstructured":"Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. 2021. Learning Transferable Visual Models From Natural Language Supervision. In Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event (Proceedings of Machine Learning Research, Vol. 139), Marina Meila and Tong Zhang (Eds.). 8748-8763."},{"key":"e_1_3_2_1_43_1","volume-title":"Direct Preference Optimization: Your Language Model is Secretly a Reward Model. ArXiv","author":"Rafailov Rafael","year":"1829","unstructured":"Rafael Rafailov, Archit Sharma, Eric Mitchell, Stefano Ermon, Christopher D. Manning, and Chelsea Finn. 2023. Direct Preference Optimization: Your Language Model is Secretly a Reward Model. ArXiv, Vol. abs\/2305.18290 (2023). https:\/\/api.semanticscholar.org\/CorpusID:258959321"},{"key":"e_1_3_2_1_44_1","first-page":"1","article-title":"Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer","volume":"21","author":"Raffel Colin","year":"2020","unstructured":"Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer. Journal of Machine Learning Research, Vol. 21, 140 (2020), 1-67. http:\/\/jmlr.org\/papers\/v21\/20-074.html","journal-title":"Journal of Machine Learning Research"},{"key":"e_1_3_2_1_45_1","volume-title":"LAION-400M: Open Dataset of CLIP-Filtered 400 Million Image-Text Pairs. ArXiv preprint","author":"Schuhmann Christoph","year":"2021","unstructured":"Christoph Schuhmann, Richard Vencu, Romain Beaumont, Robert Kaczmarczyk, Clayton Mullis, Aarush Katta, Theo Coombes, Jenia Jitsev, and Aran Komatsuzaki. 2021. LAION-400M: Open Dataset of CLIP-Filtered 400 Million Image-Text Pairs. ArXiv preprint, Vol. abs\/2111.02114 (2021)."},{"key":"e_1_3_2_1_46_1","volume-title":"Slow-Fast Architecture for Video Multi-Modal Large Language Models. arXiv preprint arXiv:2504.01328","author":"Shi Min","year":"2025","unstructured":"Min Shi, Shihao Wang, Chieh-Yun Chen, Jitesh Jain, Kai Wang, Junjun Xiong, Guilin Liu, Zhiding Yu, and Humphrey Shi. 2025a. Slow-Fast Architecture for Video Multi-Modal Large Language Models. arXiv preprint arXiv:2504.01328 (2025)."},{"key":"e_1_3_2_1_47_1","unstructured":"Yang Shi Huanqian Wang Wulin Xie Huanyao Zhang Lijie Zhao Yi-Fan Zhang Xinfeng Li Chaoyou Fu Zhuoer Wen Wenting Liu et al. 2025b. MME-VideoOCR: Evaluating OCR-Based Capabilities of Multimodal LLMs in Video Scenarios. arXiv preprint arXiv:2505.21333 (2025)."},{"key":"e_1_3_2_1_48_1","volume-title":"Video-XL: Extra-Long Vision Language Model for Hour-Scale Video Understanding. arXiv preprint arXiv:2409.14485","author":"Shu Yan","year":"2024","unstructured":"Yan Shu, Peitian Zhang, Zheng Liu, Minghao Qin, Junjie Zhou, Tiejun Huang, and Bo Zhao. 2024. Video-XL: Extra-Long Vision Language Model for Hour-Scale Video Understanding. arXiv preprint arXiv:2409.14485 (2024)."},{"key":"e_1_3_2_1_49_1","doi-asserted-by":"crossref","unstructured":"Enxin Song Wenhao Chai Guanhong Wang Yucheng Zhang Haoyang Zhou Feiyang Wu Xun Guo Tian Ye Yan Lu Jenq-Neng Hwang et al. 2023. MovieChat: From Dense Token to Sparse Memory for Long Video Understanding. arXiv preprint arXiv:2307.16449 (2023).","DOI":"10.1109\/CVPR52733.2024.01725"},{"key":"e_1_3_2_1_50_1","volume-title":"Hanieh Deilamsalehy, Bryan A. Plummer, Bryan Russell, and Kate Saenko.","author":"Tan Reuben","year":"2024","unstructured":"Reuben Tan, Ximeng Sun, Ping Hu, Jui hsien Wang, Hanieh Deilamsalehy, Bryan A. Plummer, Bryan Russell, and Kate Saenko. 2024. Koala: Key frame-conditioned long video-LLM. (2024)."},{"key":"e_1_3_2_1_51_1","doi-asserted-by":"publisher","DOI":"10.1145\/3534678.3539335"},{"key":"e_1_3_2_1_52_1","volume-title":"Tarsier: Recipes for training and evaluating large video description models. arXiv preprint arXiv:2407.00634","author":"Wang Jiawei","year":"2024","unstructured":"Jiawei Wang, Liping Yuan, Yuchen Zhang, and Haomiao Sun. 2024d. Tarsier: Recipes for training and evaluating large video description models. arXiv preprint arXiv:2407.00634 (2024)."},{"key":"e_1_3_2_1_53_1","volume-title":"Qwen2-VL: Enhancing Vision-Language Model's Perception of the World at Any Resolution. arXiv preprint arXiv:2409.12191","author":"Wang Peng","year":"2024","unstructured":"Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. 2024a. Qwen2-VL: Enhancing Vision-Language Model's Perception of the World at Any Resolution. arXiv preprint arXiv:2409.12191 (2024)."},{"key":"e_1_3_2_1_54_1","first-page":"121475","article-title":"Cogvlm: Visual expert for pretrained language models","volume":"37","author":"Wang Weihan","year":"2024","unstructured":"Weihan Wang, Qingsong Lv, Wenmeng Yu, Wenyi Hong, Ji Qi, Yan Wang, Junhui Ji, Zhuoyi Yang, Lei Zhao, Song XiXuan, et al., 2024b. Cogvlm: Visual expert for pretrained language models. Advances in Neural Information Processing Systems, Vol. 37 (2024), 121475-121499.","journal-title":"Advances in Neural Information Processing Systems"},{"key":"e_1_3_2_1_55_1","volume-title":"LongLLaVA: Scaling Multi-modal LLMs to 1000 Images Efficiently via a Hybrid Architecture. arXiv preprint arXiv:2409.02889","author":"Wang Xidong","year":"2024","unstructured":"Xidong Wang, Dingjie Song, Shunian Chen, Chen Zhang, and Benyou Wang. 2024c. LongLLaVA: Scaling Multi-modal LLMs to 1000 Images Efficiently via a Hybrid Architecture. arXiv preprint arXiv:2409.02889 (2024)."},{"key":"e_1_3_2_1_56_1","unstructured":"Yi Wang Xinhao Li Ziang Yan Yinan He Jiashuo Yu Xiangyu Zeng Chenting Wang Changlian Ma Haian Huang Jianfei Gao et al. 2025. InternVideo2. 5: Empowering Video MLLMs with Long and Rich Context Modeling. arXiv preprint arXiv:2501.12386 (2025)."},{"key":"e_1_3_2_1_57_1","volume-title":"European Conference on Computer Vision. https:\/\/api.semanticscholar.org\/CorpusID:268889590","author":"Weng Yuetian","year":"2024","unstructured":"Yuetian Weng, Mingfei Han, Haoyu He, Xiaojun Chang, and Bohan Zhuang. 2024. LongVLM: Efficient Long Video Understanding via Large Language Models. In European Conference on Computer Vision. https:\/\/api.semanticscholar.org\/CorpusID:268889590"},{"key":"e_1_3_2_1_58_1","unstructured":"Zhiyu Wu Xiaokang Chen Zizheng Pan Xingchao Liu Wen Liu Damai Dai Huazuo Gao Yiyang Ma Chengyue Wu Bingxuan Wang et al. 2024. Deepseek-vl2: Mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302 (2024)."},{"key":"e_1_3_2_1_59_1","volume-title":"Longvila: Scaling long-context visual language models for long videos. arXiv preprint arXiv:2408.10188","author":"Xue Fuzhao","year":"2024","unstructured":"Fuzhao Xue, Yukang Chen, Dacheng Li, Qinghao Hu, Ligeng Zhu, Xiuyu Li, Yunhao Fang, Haotian Tang, Shang Yang, Zhijian Liu, et al., 2024. Longvila: Scaling long-context visual language models for long videos. arXiv preprint arXiv:2408.10188 (2024)."},{"key":"e_1_3_2_1_60_1","unstructured":"Yuan Yao Tianyu Yu Ao Zhang Chongyi Wang Junbo Cui Hongji Zhu Tianchi Cai Haoyu Li Weilin Zhao Zhihui He Qianyu Chen Huarong Zhou Zhensheng Zou Haoye Zhang Shengding Hu Zhi Zheng Jie Zhou Jie Cai Xu Han Guoyang Zeng Dahai Li Zhiyuan Liu and Maosong Sun. 2024. MiniCPM-V: A GPT-4V Level MLLM on Your Phone. arXiv:2408.01800 [cs.CV] https:\/\/arxiv.org\/abs\/2408.01800"},{"key":"e_1_3_2_1_61_1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00913"},{"key":"e_1_3_2_1_62_1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01100"},{"key":"e_1_3_2_1_63_1","unstructured":"Boqiang Zhang Kehan Li Zesen Cheng Zhiqiang Hu Yuqian Yuan Guanzheng Chen Sicong Leng Yuming Jiang Hang Zhang Xin Li et al. 2025a. VideoLLaMA 3: Frontier Multimodal Foundation Models for Image and Video Understanding. arXiv preprint arXiv:2501.13106 (2025)."},{"key":"e_1_3_2_1_64_1","volume-title":"Eventhallusion: Diagnosing event hallucinations in video llms. arXiv preprint arXiv:2409.16597","author":"Zhang Jiacheng","year":"2024","unstructured":"Jiacheng Zhang, Yang Jiao, Shaoxiang Chen, Na Zhao, and Jingjing Chen. 2024a. Eventhallusion: Diagnosing event hallucinations in video llms. arXiv preprint arXiv:2409.16597 (2024)."},{"key":"e_1_3_2_1_65_1","volume-title":"Vinoground: Scrutinizing LMMs over Dense Temporal Reasoning with Short Videos. arXiv preprint arXiv:2410.02763","author":"Zhang Jianrui","year":"2024","unstructured":"Jianrui Zhang, Cai Mu, and Yong Jae Lee. 2024b. Vinoground: Scrutinizing LMMs over Dense Temporal Reasoning with Short Videos. arXiv preprint arXiv:2410.02763 (2024)."},{"key":"e_1_3_2_1_66_1","volume-title":"2024 f. Long Context Transfer from Language to Vision. ArXiv","author":"Zhang Peiyuan","year":"2024","unstructured":"Peiyuan Zhang, Kaichen Zhang, Bo Li, Guangtao Zeng, Jingkang Yang, Yuanhan Zhang, Ziyue Wang, Haoran Tan, Chunyuan Li, and Ziwei Liu. 2024 f. Long Context Transfer from Language to Vision. ArXiv, Vol. abs\/2406.16852 (2024). https:\/\/api.semanticscholar.org\/CorpusID:270703489"},{"key":"e_1_3_2_1_67_1","volume-title":"Video instruction tuning with synthetic data. arXiv preprint arXiv:2410.02713","author":"Zhang Yuanhan","year":"2024","unstructured":"Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. 2024d. Video instruction tuning with synthetic data. arXiv preprint arXiv:2410.02713 (2024)."},{"key":"e_1_3_2_1_68_1","volume-title":"Beyond llava-hd: Diving into high-resolution large multimodal models. arXiv preprint arXiv:2406.08487","author":"Zhang Yi-Fan","year":"2024","unstructured":"Yi-Fan Zhang, Qingsong Wen, Chaoyou Fu, Xue Wang, Zhang Zhang, Liang Wang, and Rong Jin. 2024c. Beyond llava-hd: Diving into high-resolution large multimodal models. arXiv preprint arXiv:2406.08487 (2024)."},{"key":"e_1_3_2_1_69_1","volume-title":"Mm-rlhf: The next step forward in multimodal llm alignment. arXiv preprint arXiv:2502.10391","author":"Zhang Yi-Fan","year":"2025","unstructured":"Yi-Fan Zhang, Tao Yu, Haochen Tian, Chaoyou Fu, Peiyan Li, Jianshu Zeng, Wulin Xie, Yang Shi, Huanyu Zhang, Junkang Wu, et al., 2025b. Mm-rlhf: The next step forward in multimodal llm alignment. arXiv preprint arXiv:2502.10391 (2025)."},{"key":"e_1_3_2_1_70_1","volume-title":"2024 e. Debiasing multimodal large language models. arXiv preprint arXiv:2403.05262","author":"Zhang Yi-Fan","year":"2024","unstructured":"Yi-Fan Zhang, Weichen Yu, Qingsong Wen, Xue Wang, Zhang Zhang, Liang Wang, Rong Jin, and Tieniu Tan. 2024 e. Debiasing multimodal large language models. arXiv preprint arXiv:2403.05262 (2024)."},{"key":"e_1_3_2_1_71_1","volume-title":"MLVU: A Comprehensive Benchmark for Multi-Task Long Video Understanding. ArXiv preprint","author":"Zhou Junjie","year":"2024","unstructured":"Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. 2024. MLVU: A Comprehensive Benchmark for Multi-Task Long Video Understanding. ArXiv preprint, Vol. abs\/2406.04264 (2024)."},{"key":"e_1_3_2_1_72_1","unstructured":"Jinguo Zhu Weiyun Wang Zhe Chen Zhaoyang Liu Shenglong Ye Lixin Gu Hao Tian Yuchen Duan Weijie Su Jie Shao et al. 2025. Internvl3: Exploring advanced training and test-time recipes for open-source multimodal models. arXiv preprint arXiv:2504.10479 (2025)."}],"event":{"name":"MM '25: The 33rd ACM International Conference on Multimedia","location":"Dublin Ireland","acronym":"MM '25","sponsor":["SIGMM ACM Special Interest Group on Multimedia"]},"container-title":["Proceedings of the 33rd ACM International Conference on Multimedia"],"original-title":[],"link":[{"URL":"https:\/\/dl.acm.org\/doi\/pdf\/10.1145\/3746027.3755363","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,12,10]],"date-time":"2025-12-10T03:59:49Z","timestamp":1765339189000},"score":1,"resource":{"primary":{"URL":"https:\/\/dl.acm.org\/doi\/10.1145\/3746027.3755363"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,27]]},"references-count":72,"alternative-id":["10.1145\/3746027.3755363","10.1145\/3746027"],"URL":"https:\/\/doi.org\/10.1145\/3746027.3755363","relation":{},"subject":[],"published":{"date-parts":[[2025,10,27]]},"assertion":[{"value":"2025-10-27","order":3,"name":"published","label":"Published","group":{"name":"publication_history","label":"Publication History"}}]}}