{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,4]],"date-time":"2026-05-04T09:58:37Z","timestamp":1777888717976,"version":"3.51.4"},"reference-count":94,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,10,19]],"date-time":"2025-10-19T00:00:00Z","timestamp":1760832000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key R&D Program of China","doi-asserted-by":"publisher","award":["2022ZD0160101"],"award-info":[{"award-number":["2022ZD0160101"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,10,19]]},"DOI":"10.1109\/iccv51701.2025.02011","type":"proceedings-article","created":{"date-parts":[[2026,4,29]],"date-time":"2026-04-29T19:45:49Z","timestamp":1777491949000},"page":"21655-21666","source":"Crossref","is-referenced-by-count":0,"title":["VRBench: A Benchmark for Multi-Step Reasoning in Long Narrative Videos"],"prefix":"10.1109","author":[{"given":"Jiashuo","family":"Yu","sequence":"first","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory"}]},{"given":"Yue","family":"Wu","sequence":"additional","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory"}]},{"given":"Meng","family":"Chu","sequence":"additional","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory"}]},{"given":"Zhifei","family":"Ren","sequence":"additional","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory"}]},{"given":"Zizheng","family":"Huang","sequence":"additional","affiliation":[{"name":"Nanjing University"}]},{"given":"Pei","family":"Chu","sequence":"additional","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory"}]},{"given":"Ruijie","family":"Zhang","sequence":"additional","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory"}]},{"given":"Yinan","family":"He","sequence":"additional","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory"}]},{"given":"Qirui","family":"Li","sequence":"additional","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory"}]},{"given":"Songze","family":"Li","sequence":"additional","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory"}]},{"given":"Zhenxiang","family":"Li","sequence":"additional","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory"}]},{"given":"Zhongying","family":"Tu","sequence":"additional","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory"}]},{"given":"Conghui","family":"He","sequence":"additional","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory"}]},{"given":"Yu","family":"Qiao","sequence":"additional","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory"}]},{"given":"Yali","family":"Wang","sequence":"additional","affiliation":[{"name":"Shenzhen Institute of Advanced Technology,Chinese Academy of Sciences"}]},{"given":"Yi","family":"Wang","sequence":"additional","affiliation":[{"name":"Shanghai Artificial Intelligence Laboratory"}]},{"given":"Limin","family":"Wang","sequence":"additional","affiliation":[{"name":"Nanjing University"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Phi-3 technical report: A highly capable language model locally on your phone","author":"Abdin","year":"2024","journal-title":"arXiv preprint"},{"key":"ref2","article-title":"Qwen2. 5-vl technical report","author":"Bai","year":"2025","journal-title":"arXiv preprint"},{"key":"ref3","article-title":"Internlm2 technical report","author":"Cai","year":"2024","journal-title":"arXiv preprint"},{"key":"ref4","article-title":"Cgbench: Clue-grounded question answering benchmark for long video understanding","author":"Chen","year":"2024","journal-title":"arXiv preprint"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-main.489"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73113-6_11"},{"key":"ref7","article-title":"Expanding performance boundaries of open-source multimodal models with model, data, and testtime scaling","author":"Chen","year":"2024","journal-title":"arXiv preprint"},{"key":"ref8","article-title":"Training verifiers to solve math word problems","author":"Cobbe","year":"2021","journal-title":"arXiv preprint"},{"key":"ref9","article-title":"Asano. Tvbench: Redesigning video-language evaluation","author":"Cores","year":"2024","journal-title":"arXiv preprint"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.420"},{"key":"ref11","volume-title":"Deepl translate: The world\u2019s most accurate translator","year":"2025"},{"key":"ref12","volume-title":"Gemini 2.0 flash thinking","year":"2025"},{"key":"ref13","first-page":"89098","article-title":"Mmbench-video: A long-form multi-shot benchmark for holistic video understanding","volume":"37","author":"Fang","year":"2025","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref14","article-title":"Video-of-thought: Step-by-step video reasoning from perception to cognition","author":"Fei","year":"2024","journal-title":"arXiv preprint"},{"key":"ref15","article-title":"Sciknoweval: Evaluating multilevel scientific knowledge of large language models","author":"Feng","year":"2024","journal-title":"arXiv preprint"},{"key":"ref16","article-title":"Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal 11 ms in video analysis","author":"Fu","year":"2024","journal-title":"arXiv preprint"},{"key":"ref17","article-title":"H2ovl-mississippi vision language models technical report","author":"Galib","year":"2024","journal-title":"arXiv preprint"},{"key":"ref18","article-title":"Deepseek-rl: Incentivizing reasoning capability in 11 ms via reinforcement learning","author":"Guo","year":"2025","journal-title":"arXiv preprint"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52734.2025.02438"},{"key":"ref20","article-title":"Mmworld: Towards multi-discipline multi-faceted world model evaluation in videos","author":"He","year":"2024","journal-title":"arXiv preprint"},{"key":"ref21","article-title":"Measuring massive multitask language understanding","author":"Hendrycks","year":"2020","journal-title":"arXiv preprint"},{"key":"ref22","article-title":"Video-mmmu: Evaluating knowledge acquisition from multi-discipline professional videos","author":"Hu","year":"2025","journal-title":"arXiv preprint"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.52202\/075280-2749"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02060"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.52202\/079017-0607"},{"key":"ref26","article-title":"Livecodebench: Holistic and contamination free evaluation of large language models for code","author":"Jain","year":"2024","journal-title":"arXiv preprint"},{"key":"ref27","article-title":"Visscience: An extensive benchmark for evaluating k12 educational multi-modal scientific reasoning","author":"Jiang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/d18-1167"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.01276"},{"key":"ref30","article-title":"Aria: An open multimodal native mixture-ofexperts model","author":"Li","year":"2024","journal-title":"arXiv preprint"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02095"},{"key":"ref32","article-title":"Videochat-flash: Hierarchical compression for long-context video modeling","author":"Li","year":"2024","journal-title":"arXiv preprint"},{"key":"ref33","article-title":"Videochat-r1: Enhancing spatio-temporal perception via reinforcement fine-tuning","author":"Li","year":"2025","journal-title":"arXiv preprint"},{"key":"ref34","article-title":"Videovista: A versatile benchmark for video understanding and reasoning","author":"Li","year":"2024","journal-title":"arXiv preprint"},{"key":"ref35","article-title":"Mmsci: A multimodal multi-discipline dataset for phd-level scientific comprehension","author":"Li","year":"2024","journal-title":"AI for Accelerated Materials Design-Vienna"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-short.11"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02520"},{"key":"ref38","article-title":"Deepseek-v3 technical report","author":"Liu","year":"2024","journal-title":"arXiv preprint"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-acl.517"},{"key":"ref40","article-title":"Et bench: Towards open-ended event-level video-language understanding","author":"Liu","year":"2024","journal-title":"arXiv preprint"},{"key":"ref41","volume-title":"Llama-3.3. Llama-3.3-70b-instruct","year":"2025"},{"key":"ref42","article-title":"Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts","author":"Lu","year":"2023","journal-title":"arXiv preprint"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.52202\/075280-2004"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.findings-acl.177"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/WACV45572.2020.9093523"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.26599\/cvm.2025.9450516"},{"key":"ref47","volume-title":"Hello gpt4-o","year":"2024"},{"key":"ref48","volume-title":"Introducing openai o1","year":"2024"},{"key":"ref49","volume-title":"Openai o3-mini","year":"2025"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.52202\/075280-1852"},{"key":"ref51","first-page":"28492","article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"International conference on machine learning","author":"Radford","year":"2023"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.52202\/075280-2338"},{"key":"ref53","article-title":"Cinepile: A long video question answering dataset and benchmark","author":"Rawal","year":"2024","journal-title":"arXiv preprint"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1007\/s00799-022-00329-y"},{"key":"ref55","article-title":"Proximal policy optimization algorithms","author":"Schulman","year":"2017","journal-title":"arXiv preprint"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01725"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i17.29872"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.501"},{"key":"ref59","article-title":"Claude Team","volume-title":"Claude 3.7 sonnet","year":"2025"},{"key":"ref60","volume-title":"Mimo-vl technical report","author":"Team","year":"2025"},{"key":"ref61","article-title":"Gemini: a family of highly capable multimodal models","author":"Anil","year":"2023","journal-title":"arXiv preprint"},{"key":"ref62","article-title":"Kimi-vl technical report","author":"Team","year":"2025","journal-title":"arXiv preprint"},{"key":"ref63","article-title":"Kwai keye-vl technical report","author":"Keye Team","year":"2025","journal-title":"arXiv preprint"},{"key":"ref64","article-title":"Qwen Team","volume-title":"Qwq: Reflect deeply on the boundaries of the unknown","year":"2024"},{"key":"ref65","article-title":"Qwen Team","year":"2025","journal-title":"Qwq-32b: Embracing the power of reinforcement learning"},{"key":"ref66","article-title":"Llamavo1: Rethinking step-by-step visual reasoning in 11 ms","author":"Thawakar","year":"2025","journal-title":"arXiv preprint"},{"key":"ref67","first-page":"95095","article-title":"Measuring multimodal mathematical reasoning with math-vision dataset","volume":"37","author":"Wang","year":"2025","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref68","article-title":"Qwen2-vl: Enhancing vision-language model\u2019s perception of the world at any resolution","author":"Wang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref69","article-title":"Enhancing the reasoning ability of multimodal large language models via mixed preference optimization","author":"Wang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref70","article-title":"Lvbench: An extreme long video understanding benchmark","author":"Wang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73013-9_23"},{"key":"ref72","article-title":"Mmlu-pro: A more robust and challenging multi-task language understanding benchmark","volume-title":"The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track","author":"Wang","year":"2024"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73013-9_23"},{"key":"ref74","first-page":"113569","article-title":"Charxiv: Charting gaps in realistic chart understanding in multimodal 11 ms","volume":"37","author":"Wang","year":"2025","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.52202\/068431-1800"},{"key":"ref76","article-title":"Star: A benchmark for situated reasoning in real-world videos","author":"Wu","year":"2024","journal-title":"arXiv preprint"},{"key":"ref77","first-page":"28828","article-title":"Longvideobench: A benchmark for long-context interleaved video-language understanding","volume":"37","author":"Wu","year":"2025","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.findings-emnlp.710"},{"key":"ref79","article-title":"Deepseek-vl2: Mixture-ofexperts vision-language models for advanced multimodal understanding","author":"Wu","year":"2024","journal-title":"arXiv preprint"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00965"},{"key":"ref81","article-title":"Llava-o1: Let vision language models reason step-by-step","author":"Xu","year":"2024","journal-title":"arXiv preprint"},{"key":"ref82","volume-title":"Qwen2 technical report","author":"Yang","year":"2024"},{"key":"ref83","article-title":"Qwen2. 5 technical report","author":"Yang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref84","first-page":"57240","article-title":"Vript: A video is worth thousands of words","volume":"37","author":"Yang","year":"2025","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.00913"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.acl-long.736"},{"key":"ref87","article-title":"Mr-gsm8k: A meta-reasoning benchmark for large language model evaluation","author":"Zeng","year":"2023","journal-title":"arXiv preprint"},{"key":"ref88","article-title":"Videollama 3: Frontier multimodal foundation models for image and video understanding","author":"Zhang","year":"2025","journal-title":"arXiv preprint"},{"key":"ref89","article-title":"Cmmmu: A chinese massive multidiscipline multimodal understanding benchmark","author":"Zhang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref90","article-title":"Movqa: A benchmark of versatile question-answering for long-form movie understanding","author":"Zhang","year":"2023","journal-title":"arXiv preprint"},{"key":"ref91","article-title":"Long context transfer from language to vision","author":"Zhang","year":"2024","journal-title":"arXiv preprint"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.00793"},{"key":"ref93","article-title":"Mlvu: A comprehensive benchmark for multi-task long video understanding","author":"Zhou","year":"2024","journal-title":"arXiv preprint"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW59228.2023.00218"}],"event":{"name":"2025 IEEE\/CVF International Conference on Computer Vision (ICCV)","location":"Honolulu, HI, USA","start":{"date-parts":[[2025,10,19]]},"end":{"date-parts":[[2025,10,25]]}},"container-title":["2025 IEEE\/CVF International Conference on Computer Vision (ICCV)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11443115\/11443287\/11444919.pdf?arnumber=11444919","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T04:55:06Z","timestamp":1777611306000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11444919\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,10,19]]},"references-count":94,"URL":"https:\/\/doi.org\/10.1109\/iccv51701.2025.02011","relation":{},"subject":[],"published":{"date-parts":[[2025,10,19]]}}}