{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,6]],"date-time":"2026-04-06T20:52:42Z","timestamp":1775508762045,"version":"3.50.1"},"reference-count":58,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"5","license":[{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T00:00:00Z","timestamp":1777593600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"National Key R&amp;D Program of China","award":["2024YFE0211000"],"award-info":[{"award-number":["2024YFE0211000"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62402341"],"award-info":[{"award-number":["62402341"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62471287"],"award-info":[{"award-number":["62471287"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Shanghai Municipal Science and Technology Major Project","award":["2021SHZDZX0102"],"award-info":[{"award-number":["2021SHZDZX0102"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2026,5]]},"DOI":"10.1109\/tpami.2026.3650864","type":"journal-article","created":{"date-parts":[[2026,1,12]],"date-time":"2026-01-12T22:00:30Z","timestamp":1768255230000},"page":"5586-5603","source":"Crossref","is-referenced-by-count":0,"title":["Parse, Align and Aggregate: Graph-Driven Compositional Reasoning for Video Question Answering"],"prefix":"10.1109","volume":"48","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3873-4053","authenticated-orcid":false,"given":"Jiangtong","family":"Li","sequence":"first","affiliation":[{"name":"School of Computer Science and Technology, Tongji University, Shanghai, China"}]},{"given":"Zhaohe","family":"Liao","sequence":"additional","affiliation":[{"name":"School of Computer Science, Shanghai Jiao Tong University, Shanghai, China"}]},{"given":"Fengshun","family":"Xiao","sequence":"additional","affiliation":[{"name":"BiliBili Inc, Shanghai, China"}]},{"given":"Tianjiao","family":"Li","sequence":"additional","affiliation":[{"name":"BiliBili Inc, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0005-7590-3567","authenticated-orcid":false,"given":"Qiang","family":"Zhang","sequence":"additional","affiliation":[{"name":"BiliBili Inc, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2414-4362","authenticated-orcid":false,"given":"Haohua","family":"Zhao","sequence":"additional","affiliation":[{"name":"School of Computer Science, Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1970-8634","authenticated-orcid":false,"given":"Li","family":"Niu","sequence":"additional","affiliation":[{"name":"School of Computer Science, Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7416-592X","authenticated-orcid":false,"given":"Guang","family":"Chen","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Tongji University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7597-8503","authenticated-orcid":false,"given":"Liqing","family":"Zhang","sequence":"additional","affiliation":[{"name":"School of Computer Science, Shanghai Jiao Tong University, Shanghai, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0637-9317","authenticated-orcid":false,"given":"Changjun","family":"Jiang","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Tongji University, Shanghai, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-demo.49"},{"key":"ref2","article-title":"VideoLLaMA 2: Advancing spatial-temporal modeling and audio understanding in video-LLMs","author":"Cheng","year":"2024"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.342"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00965"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.02059"},{"key":"ref6","article-title":"STAR: A benchmark for situated reasoning in real-world videos","volume-title":"Proc. Conf. Neural Inf. Process. Syst.","author":"Wu","year":"2021"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01099"},{"key":"ref8","first-page":"46212","article-title":"EgoSchema: A diagnostic benchmark for very long-form video language understanding","volume-title":"Proc. 37th Conf. Neural Inf. Process. Syst.","author":"Mangalam","year":"2023"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02095"},{"key":"ref10","article-title":"Weak-shot keypoint estimation via keyness and correspondence transfer","volume-title":"Proc. Conf. Neural Inf. Process. Syst.","author":"Chen","year":"2025"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00499"},{"key":"ref12","first-page":"13109","article-title":"Video-of-thought: Step-by-step video reasoning from perception to cognition","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Fei","year":"2024"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.679"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00294"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01272"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i2.16203"},{"key":"ref17","first-page":"11633","article-title":"CLEVRER: Collision events for video representation and reasoning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Yi","year":"2020"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02221"},{"key":"ref19","first-page":"57116","article-title":"MMT-Bench: A comprehensive multimodal benchmark for evaluating large vision-language models towards multitask AGI","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Ying","year":"2024"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01113"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01025"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33016391"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00210"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6767"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-short.122"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i3.20184"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20059-5_3"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3292266"},{"key":"ref29","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford","year":"2021"},{"key":"ref30","article-title":"The LLaMA 3 herd of models","author":"Dubey","year":"2024"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3393452"},{"key":"ref32","article-title":"LLaVA-NeXT-interleave: Tackling multi-image, video, and 3D in large multimodal models","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Li","year":"2025"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00757"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00686"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3548061"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.12"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3548035"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3303451"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3284038"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00261"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1145\/3581783.3611873"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.02243"},{"key":"ref43","first-page":"12513","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Hu","year":"2022"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.149"},{"key":"ref46","article-title":"Qwen2-VL: Enhancing vision-language model\u2019s perception of the world at any resolution","author":"Wang","year":"2024"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72952-2_19"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01300"},{"key":"ref49","article-title":"VideoLLaMA 3: Frontier multimodal foundation models for image and video understanding","author":"Zhang","year":"2025"},{"key":"ref50","article-title":"LLaVA-OneVision: Easy visual task transfer","author":"Li","year":"2025","journal-title":"Trans. Mach. Learn. Res."},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33019127"},{"key":"ref52","article-title":"MiniGPT4-Video: Advancing multimodal LLMs for video understanding with interleaved visual-textual tokens","author":"Ataallah","year":"2024"},{"key":"ref53","first-page":"14316","article-title":"LLaMA-Adapter: Efficient fine-tuning of large language models with zero-initialized attention","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zhang","year":"2024"},{"key":"ref54","first-page":"22185","article-title":"Video-LaVIT: Unified video-language pre-training with decoupled visual-motional tokenization","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jin","year":"2024"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.00312"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1007\/s11432-024-4321-9"},{"key":"ref57","first-page":"4061","article-title":"Decoupled weight decay regularization","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Loshchilov","year":"2019"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01735"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/34\/11474534\/11329152.pdf?arnumber=11329152","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,4,6]],"date-time":"2026-04-06T19:56:13Z","timestamp":1775505373000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11329152\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,5]]},"references-count":58,"journal-issue":{"issue":"5"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2026.3650864","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,5]]}}}