{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,10]],"date-time":"2026-03-10T11:35:00Z","timestamp":1773142500713,"version":"3.50.1"},"reference-count":92,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["82441024"],"award-info":[{"award-number":["82441024"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62202034"],"award-info":[{"award-number":["62202034"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Beijing Natural Science Foundation","award":["4242044"],"award-info":[{"award-number":["4242044"]}]},{"DOI":"10.13039\/501100004750","name":"Aeronautical Science Foundation of China","doi-asserted-by":"publisher","award":["2023Z071051002"],"award-info":[{"award-number":["2023Z071051002"]}],"id":[{"id":"10.13039\/501100004750","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Image Process."],"published-print":{"date-parts":[[2025]]},"DOI":"10.1109\/tip.2025.3583168","type":"journal-article","created":{"date-parts":[[2025,7,15]],"date-time":"2025-07-15T17:44:38Z","timestamp":1752601478000},"page":"4500-4514","source":"Crossref","is-referenced-by-count":3,"title":["Cross-Modal Contrastive Masked AutoEncoder for Compressed Video Pre-Training"],"prefix":"10.1109","volume":"34","author":[{"given":"Bing","family":"Li","sequence":"first","affiliation":[{"name":"State Key Laboratory of Complex and Critical Software Environment and the School of Computer Science and Engineering, Beihang University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0112-4166","authenticated-orcid":false,"given":"Jiaxin","family":"Chen","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, Beihang University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0000-1016-5528","authenticated-orcid":false,"given":"Guohao","family":"Li","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Complex and Critical Software Environment and the School of Computer Science and Engineering, Beihang University, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1237-7177","authenticated-orcid":false,"given":"Dongming","family":"Zhang","sequence":"additional","affiliation":[{"name":"National Computer Network Emergency Response Technical Team, Coordination Center of China, Beijing, China"}]},{"given":"Xiuguo","family":"Bao","sequence":"additional","affiliation":[{"name":"National Computer Network Emergency Response Technical Team, Coordination Center of China, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2412-9330","authenticated-orcid":false,"given":"Di","family":"Huang","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Complex and Critical Software Environment and the School of Computer Science and Engineering, Beihang University, Beijing, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19830-4_28"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/174"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547761"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00140"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3231082"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3101826"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3092949"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3055063"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00720"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58604-1_20"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2021.103188"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/s11704-023-3284-5"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00631"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00136"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413641"},{"key":"ref16","first-page":"14176","article-title":"Compressed video contrastive learning","volume-title":"Proc. Adv. Neural Inform. Process. Syst. (NeurIPS)","volume":"34","author":"Huo"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/148"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1007\/s11704-024-40027-3"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/s11704-023-2563-5"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3283282"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/MMSP.2018.8547120"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1007\/3-540-45453-5_145"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00165"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01240-3_21"},{"key":"ref25","article-title":"Masked autoencoders as spatiotemporal learners","author":"Feichtenhofer","year":"2022","journal-title":"arXiv:2205.09113"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12331"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00211"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01343"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19833-5_27"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3211472"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3215899"},{"key":"ref32","first-page":"16815","article-title":"VideoMAE: Masked autoencoders are data-efficient learners for self-supervised video pre-training","volume-title":"Proc. Adv. Neural Inform. Process. Syst. (NeurIPS)","volume":"35","author":"Zhan"},{"key":"ref33","first-page":"1","article-title":"Self-supervised learning of compressed video representations","volume-title":"Proc. Int. Conf. Learn. Represent. (ICLR)","author":"Yu"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3112008"},{"key":"ref35","article-title":"An image is worth 16\u00d716 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020","journal-title":"arXiv:2010.11929"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00086"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01058"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6840"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00629"},{"key":"ref40","first-page":"843","article-title":"Unsupervised learning of video representations using LSTMs","volume-title":"Proc. 32nd Int. Conf. Int. Conf. Mach. Learn.","volume":"37","author":"Srivastava"},{"key":"ref41","article-title":"Self-supervised spatiotemporal feature learning via video rotation prediction","author":"Jing","year":"2018","journal-title":"arXiv:1811.11387"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00994"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58520-4_30"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33018545"},{"key":"ref45","first-page":"19545","article-title":"Space-time correspondence as a contrastive random walk","volume-title":"Proc. Adv. Neural Inform. Process. Syst. (NeurIPS)","author":"Jabri"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00190"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00331"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00689"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3130536"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3147032"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3207577"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1007\/s11704-024-3806-9"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1007\/s11704-023-2180-3"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01727"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01364"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i3.25375"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW.2019.00186"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58580-8_19"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00949"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00799"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/ICCVW54120.2021.00358"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01562"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3224877"},{"key":"ref64","first-page":"5679","article-title":"Self-supervised co-training for video representation learning","volume-title":"Proc. Adv. Neural Inform. Process. Syst. (NeurIPS)","author":"Han"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547783"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW56347.2022.00452"},{"key":"ref67","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv:1810.04805"},{"key":"ref68","article-title":"BEiT: BERT pre-training of image transformers","author":"Bao","year":"2021","journal-title":"arXiv:2106.08254"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i1.25130"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01432"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01426"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19836-6_20"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00275"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00210"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.00263"},{"key":"ref77","article-title":"Compressed video action recognition with refined motion vector","author":"Cao","year":"2019","journal-title":"arXiv:1910.02533"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00350"},{"key":"ref79","article-title":"Mobile video action recognition","author":"Huo","year":"2019","journal-title":"arXiv:1908.10155"},{"key":"ref80","article-title":"Flow-distilled IP two-stream networks for compressed video action recognition","author":"Huang","year":"2019","journal-title":"arXiv:1912.04462"},{"key":"ref81","first-page":"8792","article-title":"Generalized cross entropy loss for training deep neural networks with noisy labels","volume-title":"Proc. 32nd Int. Conf. Neural Inf. Process. Syst. (NIPS)","volume":"31","author":"Zhang"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1109\/TCI.2016.2644865"},{"key":"ref83","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413694"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00153"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2022.3160860"},{"key":"ref86","article-title":"Decoupled weight decay regularization","author":"Loshchilov","year":"2017","journal-title":"arXiv:1711.05101"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i2.16189"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3057833"},{"key":"ref89","article-title":"Masked motion encoding for self-supervised video representation learning","author":"Sun","year":"2022","journal-title":"arXiv:2210.06096"},{"key":"ref90","article-title":"Self-supervised video representation learning with motion-aware masked autoencoders","author":"Yang","year":"2022","journal-title":"arXiv:2210.04154"},{"key":"ref91","article-title":"Improved baselines with momentum contrastive learning","author":"Chen","year":"2020","journal-title":"arXiv:2003.04297"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.74"}],"container-title":["IEEE Transactions on Image Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/83\/10795784\/11080224.pdf?arnumber=11080224","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,7,26]],"date-time":"2025-07-26T06:31:59Z","timestamp":1753511519000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11080224\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"references-count":92,"URL":"https:\/\/doi.org\/10.1109\/tip.2025.3583168","relation":{},"ISSN":["1057-7149","1941-0042"],"issn-type":[{"value":"1057-7149","type":"print"},{"value":"1941-0042","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]}}}