{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,17]],"date-time":"2026-03-17T00:53:51Z","timestamp":1773708831875,"version":"3.50.1"},"reference-count":169,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2026,2,1]],"date-time":"2026-02-01T00:00:00Z","timestamp":1769904000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62376069"],"award-info":[{"award-number":["62376069"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62276004"],"award-info":[{"award-number":["62276004"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62376140"],"award-info":[{"award-number":["62376140"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004826","name":"Beijing Natural Science Foundation","doi-asserted-by":"publisher","award":["L257007"],"award-info":[{"award-number":["L257007"]}],"id":[{"id":"10.13039\/501100004826","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Young Elite Scientists Sponsorship Program by CAST","award":["2023QNRC001"],"award-info":[{"award-number":["2023QNRC001"]}]},{"DOI":"10.13039\/501100021171","name":"Basic and Applied Basic Research Foundation of Guangdong Province","doi-asserted-by":"publisher","award":["2024A1515012027"],"award-info":[{"award-number":["2024A1515012027"]}],"id":[{"id":"10.13039\/501100021171","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Shenzhen Science and Technology Program","award":["KQTD20240729102207002"],"award-info":[{"award-number":["KQTD20240729102207002"]}]},{"name":"Shenzhen Science and Technology Program","award":["ZDSYS20230626091203008"],"award-info":[{"award-number":["ZDSYS20230626091203008"]}]},{"name":"Jiangsu Science and Technology Major Program","award":["BG2024041"],"award-info":[{"award-number":["BG2024041"]}]},{"name":"Special Fund for Taishan Scholar Project of Shandong Province"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2026,2]]},"DOI":"10.1109\/tpami.2025.3615586","type":"journal-article","created":{"date-parts":[[2025,9,29]],"date-time":"2025-09-29T17:53:10Z","timestamp":1759168390000},"page":"1521-1541","source":"Crossref","is-referenced-by-count":2,"title":["A Survey on Video Temporal Grounding With Multimodal Large Language Model"],"prefix":"10.1109","volume":"48","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0247-5221","authenticated-orcid":false,"given":"Jianlong","family":"Wu","sequence":"first","affiliation":[{"name":"School of Computer Science and Technology, Harbin Institute of Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0007-3685-5747","authenticated-orcid":false,"given":"Wei","family":"Liu","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Harbin Institute of Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9597-0525","authenticated-orcid":false,"given":"Ye","family":"Liu","sequence":"additional","affiliation":[{"name":"Department of Computing, The Hong Kong Polytechnic University, Hong Kong SAR, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1582-5764","authenticated-orcid":false,"given":"Meng","family":"Liu","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Shandong Jianzhu University, Jinan, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1476-0273","authenticated-orcid":false,"given":"Liqiang","family":"Nie","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Harbin Institute of Technology, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1493-7569","authenticated-orcid":false,"given":"Zhouchen","family":"Lin","sequence":"additional","affiliation":[{"name":"State Key Lab of General AI, School of Intelligence Science and Technology, Peking University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6720-234X","authenticated-orcid":false,"given":"Chang Wen","family":"Chen","sequence":"additional","affiliation":[{"name":"Department of Computing, The Hong Kong Polytechnic University, Hong Kong SAR, China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"InternVideo: General video foundation models via generative and discriminative learning","author":"Wang","year":"2022"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.679"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2023.3331841"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00207"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.618"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.83"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00677"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299154"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00135"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01254"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.52202\/079017-0900"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00134"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02205"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58604-1_10"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01248"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00305"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33019159"},{"key":"ref18","article-title":"OPT: Open pre-trained transformer language models","author":"Zhang","year":"2022"},{"issue":"70","key":"ref19","first-page":"1","article-title":"Scaling instruction-finetuned language models","volume":"25","author":"Chung","year":"2024","journal-title":"J. Mach. Learn. Res."},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1038\/s41586-025-09422-z"},{"key":"ref21","first-page":"34892","article-title":"Visual instruction tuning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Liu"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.emnlp-demo.49"},{"key":"ref23","article-title":"PLLaVA: Parameter-free LLaVA extension from images to videos for video dense captioning","author":"Xu","year":"2024"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01353"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01357"},{"key":"ref26","first-page":"41340","article-title":"Momentor: Advancing video large language model with fine-grained temporal reasoning","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Qian"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2010.11929"},{"key":"ref28","article-title":"EVA-clip: Improved training techniques for clip at scale","author":"Sun","year":"2023"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW63382.2024.00191"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/3726302.3729945"},{"key":"ref31","first-page":"1","article-title":"Trace: Temporal grounding video LLM via causal event modeling","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Guo"},{"key":"ref32","article-title":"Hawkeye: Training video-text LLMS for grounding text in videos","author":"Wang","year":"2024"},{"key":"ref33","first-page":"1","article-title":"Timesuite: Improving MLLMs for long video understanding via grounded tuning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Zeng"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.findings-emnlp.50"},{"key":"ref35","article-title":"VideoMind: A chain-of-loRA agent for long video reasoning","author":"Liu","year":"2025"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1561\/0600000110"},{"key":"ref37","article-title":"A comprehensive study of deep video action recognition","author":"Zhu","year":"2020"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1145\/3696415"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2024.3369699"},{"key":"ref40","article-title":"A survey on natural language video localization","author":"Liu","year":"2021"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1145\/3556537"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICCST50977.2020.00123"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1145\/3532626"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2023.3258628"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6984"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00262"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01108"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i4.16406"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1145\/3240508.3240549"},{"key":"ref50","first-page":"32076","article-title":"ET bench: Towards open-ended event-level video-language understanding","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Liu"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00911"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00487"},{"key":"ref53","first-page":"11846","article-title":"Detecting moments and highlights in videos via natural language queries","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Lei"},{"key":"ref54","article-title":"CG-bench: Clue-grounded question answering benchmark for long video understanding","author":"Chen","year":"2024"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72652-1_6"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-022-01600-0"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3680774"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01263"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58548-8_20"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413841"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3052086"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3058614"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.732"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6897"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.342"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1007\/s11432-024-4321-9"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01855"},{"key":"ref68","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref69","first-page":"35946","article-title":"Masked autoencoders as spatiotemporal learners","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Feichtenhofer"},{"key":"ref70","first-page":"10078","article-title":"Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Tong"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01398"},{"key":"ref72","first-page":"49250","article-title":"InstructBLIP: Towards general-purpose vision-language models with instruction tuning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Dai"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.00799"},{"key":"ref74","first-page":"23716","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Alayrac"},{"key":"ref75","first-page":"19730","article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref76","article-title":"mPLUG-OWL: Modularization empowers large language models with multimodality","author":"Ye","year":"2023"},{"key":"ref77","first-page":"72096","article-title":"Language is not all you need: Aligning perception with language models","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Huang"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02484"},{"key":"ref79","article-title":"LlaVA-next-interleave: Tackling multi-image, video, and 3D in large multimodal models","author":"Li","year":"2024"},{"key":"ref80","article-title":"mPLUG-OWL3: Towards long image-sequence understanding in multi-modal large language models","author":"Ye","year":"2024"},{"key":"ref81","article-title":"EVLM: An efficient vision-language model for visual understanding","author":"Chen","year":"2024"},{"key":"ref82","article-title":"Slow-fast architecture for video multi-modal large language models","author":"Shi","year":"2025"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00272"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"ref85","first-page":"25278","article-title":"Laion-5b: An open large-scale dataset for training next generation image-text models","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Schuhmann"},{"key":"ref86","article-title":"M $^{3}$3 it: A large-scale dataset towards multi-modal multilingual instruction tuning","author":"Li","year":"2023"},{"key":"ref87","first-page":"26650","article-title":"LAMM: Language-assisted multi-modal instruction-tuning dataset, framework, and benchmark","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Yin"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681249"},{"key":"ref89","article-title":"Visual prompting in multimodal large language models: A survey","author":"Wu","year":"2024"},{"key":"ref90","first-page":"1","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Hu"},{"key":"ref91","first-page":"57018","article-title":"LISA: Layerwise importance sampling for memory-efficient large language model fine-tuning","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Pan"},{"key":"ref92","first-page":"32100","article-title":"DoRA: Weight-decomposed low-rank adaptation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Liu"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2025.111670"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01229"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73007-8_2"},{"key":"ref96","article-title":"Grounding-prompter: Prompting LLM with multimodal information for temporal sentence grounding in long videos","author":"Chen","year":"2023"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.3390\/app14051894"},{"key":"ref98","doi-asserted-by":"publisher","DOI":"10.52202\/079017-1278"},{"key":"ref99","article-title":"Infusing environmental captions for long-form video language grounding","author":"Lee","year":"2024"},{"key":"ref100","article-title":"Videolights: Feature refinement and cross-task alignment transformer for joint video highlight detection and moment retrieval","author":"Paul","year":"2024"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2023.3340103"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.1109\/tmm.2025.3581797"},{"key":"ref103","article-title":"Vid-morp: Video moment retrieval pretraining from unlabeled videos in the wild","author":"Bao","year":"2024"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i9.32971"},{"key":"ref105","first-page":"76749","article-title":"Self-chained image-language model for video localization and question answering","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Yu"},{"key":"ref106","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW60793.2023.00297"},{"key":"ref107","doi-asserted-by":"publisher","DOI":"10.1109\/LSP.2024.3511426"},{"key":"ref108","article-title":"LlaVA-MR: Large language-and-vision assistant for video moment retrieval","author":"Lu","year":"2024"},{"key":"ref109","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.01284"},{"key":"ref110","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52734.2025.01285"},{"key":"ref111","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i3.32341"},{"key":"ref112","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.acl-long.360"},{"key":"ref113","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52734.2025.01771"},{"key":"ref114","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73039-9_12"},{"key":"ref115","article-title":"Temporal grounding with time refining video LLM","author":"Wang","year":"2024"},{"key":"ref116","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52734.2025.00803"},{"key":"ref117","article-title":"Timemarker: A versatile video-LLM for long and short video understanding with superior temporal localization ability","author":"Chen","year":"2024"},{"key":"ref118","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i2.32214"},{"key":"ref119","first-page":"81808","article-title":"Slowfocus: Enhancing fine-grained temporal understanding in video LLM","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Nie"},{"key":"ref120","article-title":"The surprising effectiveness of multimodal large language models for video moment retrieval","author":"Meinardus","year":"2024"},{"key":"ref121","article-title":"Video LLMS for temporal reasoning in long videos","author":"Fateh","year":"2024"},{"key":"ref122","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.emnlp-main.556"},{"key":"ref123","first-page":"9804","article-title":"Mitigating the discrepancy between video and text temporal sequences: A time-perception enhanced video grounding method for LLM","volume-title":"Proc. Int. Conf. Comput. Linguistics","author":"Li"},{"key":"ref124","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52734.2025.02781"},{"key":"ref125","article-title":"SpacevLLM: Endowing multimodal large language model with spatio-temporal video grounding capability","author":"Wang","year":"2025"},{"key":"ref126","doi-asserted-by":"publisher","DOI":"10.1126\/stke.2001.99.tw326"},{"key":"ref127","article-title":"Time-R1: Post-training large vision language model for temporal video grounding","author":"Wang","year":"2025"},{"key":"ref128","article-title":"Videoexpert: Augmented LLM for temporal-sensitive video understanding","author":"Zhao","year":"2025"},{"key":"ref129","article-title":"Videochat-R1: Enhancing spatio-temporal perception via reinforcement fine-tuning","author":"Li","year":"2025"},{"key":"ref130","article-title":"MUSEG: Reinforcing video temporal understanding via timestamp-aware multi-segment grounding","author":"Luo","year":"2025"},{"key":"ref131","article-title":"LLAMA: Open and efficient foundation language models","author":"Touvron","year":"2023"},{"key":"ref132","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01842"},{"key":"ref133","article-title":"GPT-4O","year":"2024"},{"key":"ref134","article-title":"Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context","author":"Team","year":"2024"},{"key":"ref135","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1410"},{"key":"ref136","article-title":"MiniGPT-V2: Large language model as a unified interface for vision-language multi-task learning","author":"Chen","year":"2023"},{"key":"ref137","article-title":"Baichuan 2: Open large-scale language models","author":"Yang","year":"2023"},{"key":"ref138","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2024.eacl-long.41"},{"key":"ref139","article-title":"Deepseekmath: Pushing the limits of mathematical reasoning in open language models","author":"Shao","year":"2024"},{"key":"ref140","first-page":"23634","article-title":"Merlot: Multimodal neural script knowledge models","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Zellers"},{"key":"ref141","article-title":"InternVid: A large-scale video-text dataset for multimodal understanding and generation","author":"Wang","year":"2023"},{"key":"ref142","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01589"},{"key":"ref143","article-title":"Investigating the catastrophic forgetting in multimodal large language models","author":"Zhai","year":"2023"},{"key":"ref144","doi-asserted-by":"publisher","DOI":"10.1145\/3209978.3210003"},{"key":"ref145","first-page":"28442","article-title":"End-to-end multi-modal video temporal grounding","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Chen"},{"key":"ref146","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2024.3365249"},{"key":"ref147","article-title":"Beats: Audio pre-training with acoustic tokenizers","author":"Chen","year":"2022"},{"key":"ref148","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.1810.04805"},{"key":"ref149","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.563"},{"key":"ref150","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12342"},{"key":"ref151","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46448-0_31"},{"key":"ref152","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298698"},{"key":"ref153","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00965"},{"key":"ref154","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.493"},{"key":"ref155","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58539-6_31"},{"key":"ref156","first-page":"65","article-title":"Meteor: An automatic metric for MT evaluation with improved correlation with human judgments","volume-title":"Proc. Annu. Meeting Assoc. Comput. Linguistics Workshop","author":"Banerjee"},{"key":"ref157","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"ref158","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298994"},{"key":"ref159","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.432"},{"key":"ref160","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2025.findings-acl.283"},{"key":"ref161","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72952-2_19"},{"key":"ref162","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP49660.2025.10890587"},{"key":"ref163","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i7.28486"},{"key":"ref164","first-page":"28492","article-title":"Robust speech recognition via large-scale weak supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref165","article-title":"Temporal triplane transformers as occupancy world models","author":"Xu","year":"2025"},{"key":"ref166","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00497"},{"key":"ref167","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02208"},{"key":"ref168","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681042"},{"key":"ref169","article-title":"Lvbench: An extreme long video understanding benchmark","author":"Wang","year":"2024"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/34\/11345188\/11184436.pdf?arnumber=11184436","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,1,12]],"date-time":"2026-01-12T22:00:34Z","timestamp":1768255234000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11184436\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,2]]},"references-count":169,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2025.3615586","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,2]]}}}