{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:13:17Z","timestamp":1763190797016,"version":"3.45.0"},"reference-count":40,"publisher":"IEEE","license":[{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,6,30]],"date-time":"2025-06-30T00:00:00Z","timestamp":1751241600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025,6,30]]},"DOI":"10.1109\/ijcnn64981.2025.11227554","type":"proceedings-article","created":{"date-parts":[[2025,11,14]],"date-time":"2025-11-14T18:46:15Z","timestamp":1763145975000},"page":"1-8","source":"Crossref","is-referenced-by-count":0,"title":["GCL-Vid: Efficient Video Foundation Model via Generative and Contrastive Encoding with LLMs"],"prefix":"10.1109","author":[{"given":"Wanjing","family":"Xie","sequence":"first","affiliation":[{"name":"Central South University,School of Computer Science and Engineering,Changsha,China"}]},{"given":"Lingyan","family":"Zhang","sequence":"additional","affiliation":[{"name":"Central South University,School of Computer Science and Engineering,Changsha,China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52733.2024.01506"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00572"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72949-2_21"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2102.05095"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19772-7_1"},{"article-title":"Actionclip: A new paradigm for video action recognition","year":"2021","author":"Wang","key":"ref7"},{"key":"ref8","first-page":"10078","article-title":"Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training","author":"Tong","year":"2022","journal-title":"Advances in neural information processing systems(NIPS)"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"article-title":"Uniformer: Unified transformer for efficient spatiotemporal representation learning","volume-title":"Proceedings of the International Conference on Learning Representations (ICLR)","author":"Li","key":"ref10"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/iccv51070.2023.00157"},{"key":"ref12","first-page":"19730","article-title":"Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","volume-title":"International conference on machine learning","author":"Li"},{"author":"Wang","key":"ref13","article-title":"Internvideo: General video foundation models via generative and discriminative learning"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-73013-9_23"},{"key":"ref15","article-title":"Attention is all you need","author":"Vaswani","year":"2017","journal-title":"Advances in Neural Information Processing Systems(NIPS)."},{"key":"ref16","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"International conference on machine learning","author":"Radford"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02588"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00556"},{"key":"ref19","first-page":"1877","article-title":"Language models are few-shot learners","author":"Brown","year":"2020","journal-title":"Advances in neural information processing systems(NIPS)."},{"article-title":"Gpt-4 technical report","year":"2023","author":"Achiam","key":"ref20"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00756"},{"article-title":"The kinetics human action video dataset","year":"2017","author":"Kay","key":"ref22"},{"article-title":"UCF101: A dataset of 101 human actions classes from videos in the wild","year":"2012","author":"Soomro","key":"ref23"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2011.6126543"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.786"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.02095"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00630"},{"article-title":"Multi-modal self-supervision from generalized data transformations","year":"2020","author":"Patrick","key":"ref28"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00331"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475572"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.3390\/signals2030037"},{"article-title":"Llava-next: Improved reasoning, ocr, and world knowledge","year":"2024","author":"Liu","key":"ref32"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00675"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00718"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00813"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.544"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00756"},{"key":"ref39","first-page":"23634","article-title":"Merlot: Multimodal neural script knowledge models","author":"Zellers","year":"2021","journal-title":"Advances in neural information processing systems(NIPS)."},{"key":"ref40","first-page":"35946","article-title":"Masked autoencoders as spatiotemporal learners","author":"Feichtenhofer","year":"2022","journal-title":"Advances in neural information processing systems(NIPS)."}],"event":{"name":"2025 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2025,6,30]]},"location":"Rome, Italy","end":{"date-parts":[[2025,7,5]]}},"container-title":["2025 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/11227166\/11227148\/11227554.pdf?arnumber=11227554","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,15]],"date-time":"2025-11-15T07:10:13Z","timestamp":1763190613000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11227554\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,6,30]]},"references-count":40,"URL":"https:\/\/doi.org\/10.1109\/ijcnn64981.2025.11227554","relation":{},"subject":[],"published":{"date-parts":[[2025,6,30]]}}}