{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,19]],"date-time":"2026-01-19T05:28:54Z","timestamp":1768800534883,"version":"3.49.0"},"reference-count":82,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100012166","name":"National Key Research and Development Program of China","doi-asserted-by":"publisher","award":["2018AAA0102200"],"award-info":[{"award-number":["2018AAA0102200"]}],"id":[{"id":"10.13039\/501100012166","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62122018"],"award-info":[{"award-number":["62122018"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U22A2097"],"award-info":[{"award-number":["U22A2097"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62020106008"],"award-info":[{"award-number":["62020106008"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61872064"],"award-info":[{"award-number":["61872064"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004806","name":"Fok Ying Tong Education Foundation","doi-asserted-by":"publisher","award":["171106"],"award-info":[{"award-number":["171106"]}],"id":[{"id":"10.13039\/501100004806","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. on Image Process."],"published-print":{"date-parts":[[2023]]},"DOI":"10.1109\/tip.2023.3275071","type":"journal-article","created":{"date-parts":[[2023,5,15]],"date-time":"2023-05-15T18:59:20Z","timestamp":1684177160000},"page":"5017-5030","source":"Crossref","is-referenced-by-count":18,"title":["End-to-End Pre-Training With Hierarchical Matching and Momentum Contrast for Text-Video Retrieval"],"prefix":"10.1109","volume":"32","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-5136-5865","authenticated-orcid":false,"given":"Wenxue","family":"Shen","sequence":"first","affiliation":[{"name":"Center for Future Media, School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2549-8322","authenticated-orcid":false,"given":"Jingkuan","family":"Song","sequence":"additional","affiliation":[{"name":"Shenzhen Institute for Advanced Study, University of Electronic Science and Technology of China, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7728-2518","authenticated-orcid":false,"given":"Xiaosu","family":"Zhu","sequence":"additional","affiliation":[{"name":"Center for Future Media, School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China"}]},{"given":"Gongfu","family":"Li","sequence":"additional","affiliation":[{"name":"Corporate Development Group, Tencent Inc., Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2999-2088","authenticated-orcid":false,"given":"Heng Tao","family":"Shen","sequence":"additional","affiliation":[{"name":"Center for Future Media, School of Computer Science and Engineering, University of Electronic Science and Technology of China, Chengdu, China"}]}],"member":"263","reference":[{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW53098.2021.00374"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.3390\/app12136753"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00504"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00950"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01170"},{"key":"ref58","article-title":"Improved baselines with momentum contrastive learning","author":"chen","year":"2020","journal-title":"arXiv 2003 04297"},{"key":"ref53","first-page":"21271","article-title":"Bootstrap your own latent&#x2014;A new approach to self-supervised learning","author":"grill","year":"2020","journal-title":"Proc NIPS"},{"key":"ref52","first-page":"1597","article-title":"A simple framework for contrastive learning of visual representations","author":"chen","year":"2020","journal-title":"Proc ICML"},{"key":"ref11","article-title":"Improving video-text retrieval by multi-stream corpus alignment and dual softmax loss","author":"cheng","year":"2021","journal-title":"arXiv 2109 04290"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00393"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01939"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01549"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475431"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00990"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2018.2814344"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3120867"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00637"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2006.100"},{"key":"ref45","first-page":"22605","article-title":"COOT: Cooperative hierarchical transformer for video-text representation learning","author":"ging","year":"2020","journal-title":"Proc NIPS"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3147032"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2021.3130536"},{"key":"ref42","first-page":"1","article-title":"An image is worth 16&#x00D7;16 words: Transformers for image recognition at scale","author":"dosovitskiy","year":"2021","journal-title":"Proc ICLR"},{"key":"ref41","first-page":"1877","article-title":"Language models are few-shot learners","author":"brown","year":"2020","journal-title":"Proc NIPS"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00272"},{"key":"ref43","article-title":"Learning video representations using contrastive bidirectional transformer","author":"sun","year":"2019","journal-title":"arXiv 1906 05743"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3181496"},{"key":"ref8","article-title":"CLIP2Video: Mastering video-text retrieval via image CLIP","author":"fang","year":"2021","journal-title":"arXiv 2106 11097"},{"key":"ref7","article-title":"CLIP2TV: Align, match and distill for video-text retrieval","author":"gao","year":"2021","journal-title":"arXiv 2111 05610"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01065"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-77004-4_1"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2022.07.028"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00725"},{"key":"ref5","first-page":"1","article-title":"Support-set bottlenecks for video-text representation learning","author":"patrick","year":"2021","journal-title":"Proc ICLR"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01105"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00957"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2022\/773"},{"key":"ref80","article-title":"Unifying visual-semantic embeddings with multimodal neural language models","author":"kiros","year":"2014","journal-title":"arXiv 1411 2539"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TBDATA.2019.2921572"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.195"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/3477495.3531950"},{"key":"ref78","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01138"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2021.08.002"},{"key":"ref36","first-page":"825","article-title":"An investigation of practical approximate nearest neighbor algorithms","author":"liu","year":"2004","journal-title":"Proc NIPS"},{"key":"ref31","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","author":"radford","year":"2021","journal-title":"Proc ICML"},{"key":"ref75","article-title":"Decoupled weight decay regularization","author":"loshchilov","year":"2018","journal-title":"Proc ICLR"},{"key":"ref30","first-page":"4171","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"kenton","year":"2019","journal-title":"Proc NAACL-HLT"},{"key":"ref74","first-page":"2443","article-title":"Supervision exists everywhere: A data efficient contrastive language-image pre-training paradigm","author":"li","year":"2021","journal-title":"Proc ICLR"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2022.3204444"},{"key":"ref77","first-page":"279","article-title":"Use what you have: Video retrieval using representations from collaborative experts","author":"liu","year":"2019","journal-title":"Proc BMVC"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547910"},{"key":"ref76","first-page":"9346","article-title":"SGDR: Stochastic gradient descent with warm restarts","author":"loshchilov","year":"2017","journal-title":"Proc ICLR"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58548-8_13"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00495"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/s11633-022-1369-5"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1016\/j.aiopen.2022.01.001"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3124365"},{"key":"ref70","article-title":"RoBERTa: A robustly optimized BERT pretraining approach","author":"liu","year":"2019","journal-title":"arXiv 1907 11692"},{"key":"ref73","first-page":"9","article-title":"Language models are unsupervised multitask learners","volume":"1","author":"radford","year":"2019","journal-title":"OpenAIRE blog"},{"key":"ref72","first-page":"5998","article-title":"Attention is all you need","author":"vaswani","year":"2017","journal-title":"Proc NIPS"},{"key":"ref24","first-page":"24206","article-title":"VATT: Transformers for multimodal self-supervised learning from raw video, audio and text","author":"akbari","year":"2021","journal-title":"Proc NIPS"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01234-2_29"},{"key":"ref23","article-title":"UniVL: A unified video and language pre-training model for multimodal understanding and generation","author":"luo","year":"2020","journal-title":"arXiv 2002 06353"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00468"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.naacl-main.193"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-acl.370"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.162"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/s11704-021-1248-1"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.83"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0987-1"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00756"},{"key":"ref66","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TIP.2022.3205212"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.502"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.161"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00877"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.544"},{"key":"ref60","article-title":"Representation learning with contrastive predictive coding","author":"van den oord","year":"2018","journal-title":"arXiv 1807 03748"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12342"},{"key":"ref61","article-title":"Self-supervised learning with Swin transformers","author":"xie","year":"2021","journal-title":"arXiv 2105 04553"}],"container-title":["IEEE Transactions on Image Processing"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/83\/9991910\/10124819.pdf?arnumber=10124819","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,10,2]],"date-time":"2023-10-02T18:13:50Z","timestamp":1696270430000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10124819\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"references-count":82,"URL":"https:\/\/doi.org\/10.1109\/tip.2023.3275071","relation":{},"ISSN":["1057-7149","1941-0042"],"issn-type":[{"value":"1057-7149","type":"print"},{"value":"1941-0042","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]}}}