{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,28]],"date-time":"2026-02-28T10:13:21Z","timestamp":1772273601171,"version":"3.50.1"},"reference-count":81,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U21B2043"],"award-info":[{"award-number":["U21B2043"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62102416"],"award-info":[{"award-number":["62102416"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Key Research and Development Program of Jiangsu Province","award":["BE2023016-3"],"award-info":[{"award-number":["BE2023016-3"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Multimedia"],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tmm.2024.3521729","type":"journal-article","created":{"date-parts":[[2024,12,25]],"date-time":"2024-12-25T19:32:25Z","timestamp":1735155145000},"page":"2168-2180","source":"Crossref","is-referenced-by-count":6,"title":["VLAB: Enhancing Video Language Pretraining by Feature Adapting and Blending"],"prefix":"10.1109","volume":"27","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-5396-6253","authenticated-orcid":false,"given":"Xingjian","family":"He","sequence":"first","affiliation":[{"name":"Laboratory of Cognition and Decision Intelligence for Complex Systems, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"given":"Sihan","family":"Chen","sequence":"additional","affiliation":[{"name":"Laboratory of Cognition and Decision Intelligence for Complex Systems, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4131-1222","authenticated-orcid":false,"given":"Fan","family":"Ma","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University, Zhejiang, China"}]},{"given":"Zhicheng","family":"Huang","sequence":"additional","affiliation":[{"name":"University of Science and Technology Beijing, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7850-1353","authenticated-orcid":false,"given":"Xiaojie","family":"Jin","sequence":"additional","affiliation":[{"name":"Bytedance Inc., Beijing, China"}]},{"given":"Zikang","family":"Liu","sequence":"additional","affiliation":[{"name":"Laboratory of Cognition and Decision Intelligence for Complex Systems, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3918-9448","authenticated-orcid":false,"given":"Dongmei","family":"Fu","sequence":"additional","affiliation":[{"name":"University of Science and Technology Beijing, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0512-880X","authenticated-orcid":false,"given":"Yi","family":"Yang","sequence":"additional","affiliation":[{"name":"College of Computer Science and Technology, Zhejiang University, Zhejiang, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0903-9131","authenticated-orcid":false,"given":"Jing","family":"Liu","sequence":"additional","affiliation":[{"name":"Laboratory of Cognition and Decision Intelligence for Complex Systems, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-6843-0064","authenticated-orcid":false,"given":"Jiashi","family":"Feng","sequence":"additional","affiliation":[{"name":"Bytedance Inc., Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00490"},{"key":"ref2","article-title":"VIOLET: End-to-end video-language transformers with masked visual-token modeling","author":"Fu","year":"2021"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73397-0_4"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00638"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/iccv48922.2021.00175"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02214"},{"key":"ref7","article-title":"CLIP-VIP: Adapting pre-trained image-text model to video-language representation alignment","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Xue","year":"2023"},{"key":"ref8","first-page":"23634","article-title":"MERLOT: Multimodal neural script knowledge models","volume":"34","author":"Zellers","year":"2021","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref9","article-title":"UniVL: A. unified video and language pre-training model for multimodal understanding and generation","author":"Luo","year":"2020"},{"key":"ref10","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford","year":"2021"},{"issue":"2","key":"ref11","article-title":"Clip2tv: An empirical study on transformer-based methods for video-text retrieval","volume":"1","author":"Gao","year":"2021"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3479207"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2022.07.028"},{"key":"ref14","article-title":"CLIP2VIDEO: Mastering video-text retrieval via image clip","author":"Fang","year":"2021"},{"key":"ref15","article-title":"Zero-shot video captioning with evolving pseudo-tokens","author":"Tewel","year":"2022"},{"key":"ref16","first-page":"23716","article-title":"Flamingo: A visual language model for few-shot learning","volume":"35","author":"Alayrac","year":"2022","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2019.2915033"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3090595"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2020.3026892"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2020.2972830"},{"key":"ref21","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jia","year":"2021"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01857"},{"key":"ref23","article-title":"Simvlm: Simple visual language model pretraining with weak supervision","author":"Wang","year":"2021"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.2307\/jj.1823137.10"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52729.2023.01838"},{"key":"ref26","first-page":"23318","article-title":"OFA: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wang","year":"2022"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12342"},{"key":"ref28","first-page":"4581","article-title":"Vatex: A large-scale, high-quality multilingual dataset for video-and-language research","volume-title":"Proc. IEEE\/CVF Int. Conf. Comput. Vis.","author":"Wang","year":"2019"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/tpami.2024.3479776"},{"key":"ref30","first-page":"7464","article-title":"VideoBERT: A joint model for video and language representation learning","volume-title":"Proc. IEEE\/CVF Int. Conf. Comput. Vis.","author":"Sun","year":"2019"},{"key":"ref31","doi-asserted-by":"crossref","DOI":"10.18653\/v1\/2020.emnlp-main.161","article-title":"HERO: Hierarchical encoder for video language omni-representation pre-training","author":"Li","year":"2020"},{"key":"ref32","first-page":"2630","article-title":"Howto100 m: Learning a text-video embedding by watching hundred million narrated video clips","volume-title":"Proc. IEEE\/CVF Int. Conf. Comput. Vis.","author":"Miech","year":"2019"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00990"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2023.3237166"},{"key":"ref35","first-page":"970","article-title":"Centerclip: Token clustering for efficient text-video retrieval","volume-title":"Proc. 45th Int. ACM SIGIR Conf. Res. Develop. Inf. Retrieval","author":"Zhao","year":"2022"},{"key":"ref36","first-page":"506","article-title":"Learning multiple visual domains with residual adapters","volume":"30","author":"Rebuffi","year":"2017","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref37","article-title":"LoRA: Low-rank adaptation of large language models","author":"Hu","year":"2021"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00516"},{"key":"ref39","first-page":"26462","article-title":"St-adapter: Parameter-efficient image-to-video transfer learning","volume-title":"Adv. Neural Inf. Process. Syst.","author":"Pan"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00847"},{"key":"ref41","first-page":"16664","article-title":"Adaptformer: Adapting vision transformers for scalable visual recognition","volume-title":"Proc. 36th Int. Conf. Neural Inf. Process. Syst.","author":"Chen","year":"2022"},{"key":"ref42","first-page":"202","article-title":"Convolutional bypasses are better vision transformer adapters","volume-title":"Proc. Eur. Conf. Artif. Intell.","author":"Jie","year":"2024"},{"key":"ref43","article-title":"Towards a unified view of parameter-efficient transfer learning","volume-title":"Proc. Int. Conf. Learn. Representations","author":"He","year":"2021"},{"key":"ref44","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. 39th Int. Conf. Mach. Learn.","author":"Li","year":"2022"},{"key":"ref45","article-title":"CoCa: Contrastive captioners are image-text foundation models","author":"Yu","year":"2022"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/n19-1423"},{"key":"ref47","first-page":"13063","article-title":"Unified language model pre-training for natural language understanding and generation","volume":"32","author":"Dong","year":"2019","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1238"},{"key":"ref49","first-page":"1143","article-title":"Im2text: Describing images using 1 million captioned photographs","volume":"24","author":"Ordonez","year":"2011","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref50","doi-asserted-by":"crossref","first-page":"32","DOI":"10.1007\/s11263-016-0981-7","article-title":"Visual genome: Connecting language and vision using crowdsourced dense image annotations","volume":"123","author":"Krishna","year":"2017","journal-title":"Int. J. Comput. Vis."},{"key":"ref51","article-title":"Microsoft COCO captions: Data collection and evaluation server","author":"Chen","year":"2015"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00356"},{"key":"ref53","article-title":"Improving video-text retrieval by multi-stream corpus alignment and dual softmax loss","author":"Cheng","year":"2021"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01855"},{"key":"ref55","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018"},{"key":"ref56","article-title":"CLOVER: Towards a unified video-language alignment and fusion model","author":"Huang","year":"2022"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01413"},{"key":"ref58","first-page":"5696","article-title":"OmniVL: One foundation model for image-language and video-language tasks","volume-title":"Proc. 36th Int. Conf. Neural Inf. Process. Syst.","author":"Wang","year":"2024"},{"key":"ref59","first-page":"38728","article-title":"mPLUG-2: A modularized multi-modal foundation model across text, image and video","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Xu","year":"2023"},{"key":"ref60","article-title":"Revealing single frame bias for video-and-language learning","author":"Lei","year":"2022"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01034"},{"key":"ref62","first-page":"124","article-title":"Zero-shot video question answering via frozen bidirectional language models","volume-title":"Adv. Neural Inf. Process. Syst.","volume":"35","author":"Yang","year":"2022"},{"key":"ref63","article-title":"Video-text modeling with zero-shot transfer from contrastive captioners","author":"Yan","year":"2022"},{"key":"ref64","article-title":"InternVideo: General video foundation models via generative and discriminative learning","author":"Wang","year":"2022"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1145\/3123266.3123427"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.618"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.149"},{"key":"ref69","first-page":"311","article-title":"Bleu: A method for automatic evaluation of machine translation","volume-title":"Proc. 40th Annu. meeting Assoc. Comput. Linguistics","author":"Papineni","year":"2002"},{"key":"ref70","first-page":"65","article-title":"Meteor: An automatic metric for mt evaluation with improved correlation with human judgments","volume-title":"Proc. ACL Workshop Intrinsic Extrinsic Eval. Measures Mach. Transl. Summarization","author":"Banerjee","year":"2005"},{"key":"ref71","first-page":"74","article-title":"ROUGE: A package for automatic evaluation of summaries","volume-title":"Text Summarization Branches Out","author":"Lin","year":"2004"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01742"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01743"},{"key":"ref75","article-title":"Disentangled representation learning for text-video retrieval","author":"Wang","year":"2022"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19781-9_19"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547910"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01136"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_8"},{"key":"ref80","first-page":"26462","article-title":"ST-adapter: Parameter-efficient image-to-video transfer learning","volume":"35","author":"Pan","year":"2022","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"ref81","article-title":"BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","author":"Li","year":"2023"}],"container-title":["IEEE Transactions on Multimedia"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6046\/10844992\/10814098.pdf?arnumber=10814098","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,7]],"date-time":"2025-04-07T21:56:17Z","timestamp":1744062977000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10814098\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":81,"URL":"https:\/\/doi.org\/10.1109\/tmm.2024.3521729","relation":{},"ISSN":["1520-9210","1941-0077"],"issn-type":[{"value":"1520-9210","type":"print"},{"value":"1941-0077","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}