{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,14]],"date-time":"2026-04-14T09:02:07Z","timestamp":1776157327080,"version":"3.50.1"},"reference-count":63,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"4","license":[{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,4,1]],"date-time":"2024-04-01T00:00:00Z","timestamp":1711929600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62176137"],"award-info":[{"award-number":["62176137"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62006140"],"award-info":[{"award-number":["62006140"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100020196","name":"Shandong Provincial Natural Science and Foundation","doi-asserted-by":"publisher","award":["ZR2020QF106"],"award-info":[{"award-number":["ZR2020QF106"]}],"id":[{"id":"10.13039\/501100020196","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100018735","name":"Ant Group","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100018735","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Circuits Syst. Video Technol."],"published-print":{"date-parts":[[2024,4]]},"DOI":"10.1109\/tcsvt.2023.3303945","type":"journal-article","created":{"date-parts":[[2023,8,10]],"date-time":"2023-08-10T17:50:07Z","timestamp":1691689807000},"page":"2525-2535","source":"Crossref","is-referenced-by-count":5,"title":["SNP-S<sup>3<\/sup>: Shared Network Pre-Training and Significant Semantic Strengthening for Various Video-Text Tasks"],"prefix":"10.1109","volume":"34","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-0245-9064","authenticated-orcid":false,"given":"Xingning","family":"Dong","sequence":"first","affiliation":[{"name":"School of Computer Science and Technology, Shandong University, Qingdao, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-0521-9664","authenticated-orcid":false,"given":"Qingpei","family":"Guo","sequence":"additional","affiliation":[{"name":"Ant Group Company Ltd., Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3197-5698","authenticated-orcid":false,"given":"Tian","family":"Gan","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Shandong University, Qingdao, China"}]},{"given":"Qing","family":"Wang","sequence":"additional","affiliation":[{"name":"Ant Group Company Ltd., Hangzhou, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0247-5221","authenticated-orcid":false,"given":"Jianlong","family":"Wu","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Harbin Institute of Technology (Shenzhen), Shenzhen, China"}]},{"given":"Xiangyuan","family":"Ren","sequence":"additional","affiliation":[{"name":"School of Computer Science and Technology, Shandong University, Qingdao, China"}]},{"given":"Yuan","family":"Cheng","sequence":"additional","affiliation":[{"name":"Artificial Intelligence Innovation and Incubation (AI3) Institute, Fudan University, Shanghai, China"}]},{"given":"Wei","family":"Chu","sequence":"additional","affiliation":[{"name":"Ant Group Company Ltd., Hangzhou, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/TASLP.2021.3124365"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1145\/3566126"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3164467"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3054525"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1145\/3473140"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/3585388"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3150959"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2018.2807588"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3090595"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2020.2995959"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2020.3048440"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2008.2002831"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01113"},{"key":"ref14","article-title":"CLEVRER: Collision events for video representation and reasoning","author":"Yi","year":"2019","journal-title":"arXiv:1910.01442"},{"key":"ref15","first-page":"1","article-title":"Star: A benchmark for situated reasoning in real-world videos","volume-title":"Proc. 35th Conf. Neural Inf. Process. Syst. Datasets Benchmarks Track","author":"Wu"},{"key":"ref16","article-title":"Comphy: Compositional physical reasoning of objects and events from videos","author":"Chen","year":"2022","journal-title":"arXiv:2205.01089"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01589"},{"key":"ref18","article-title":"VALUE: A multi-task benchmark for video-and-language understanding evaluation","author":"Li","year":"2021","journal-title":"arXiv:2106.04632"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3551581"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-acl.370"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.161"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3475703"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00725"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01569"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"ref26","article-title":"CLIP2Video: Mastering video-text retrieval via image CLIP","author":"Fang","year":"2021","journal-title":"arXiv:2106.11097"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00490"},{"key":"ref28","article-title":"VIOLET: End-to-end video-language transformers with masked visual-token modeling","author":"Fu","year":"2021","journal-title":"arXiv:2111.12681"},{"key":"ref29","article-title":"CLIP4Clip: An empirical study of CLIP for end to end video clip retrieval","author":"Luo","year":"2021","journal-title":"arXiv:2104.08860"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3165934"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3172971"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3207910"},{"key":"ref33","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv:1810.04805"},{"key":"ref34","article-title":"Pixel-BERT: Aligning image pixels with text by deep multi-modal transformers","author":"Huang","year":"2020","journal-title":"arXiv:2004.00849"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3235704"},{"key":"ref36","article-title":"UniVL: A unified video and language pre-training model for multimodal understanding and generation","author":"Luo","year":"2020","journal-title":"arXiv:2002.06353"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00877"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01267-0_19"},{"key":"ref39","article-title":"DistilBERT, a distilled version of BERT: Smaller, faster, cheaper and lighter","author":"Sanh","year":"2019","journal-title":"arXiv:1910.01108"},{"key":"ref40","article-title":"Object-aware video-language pre-training for retrieval","author":"Jinpeng Wang","year":"2021","journal-title":"arXiv:2112.00656"},{"key":"ref41","first-page":"23634","article-title":"MERLOT: Multimodal neural script knowledge models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Zellers"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01136"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-01234-2_29"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1238"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.618"},{"key":"ref51","first-page":"190","article-title":"Collecting highly parallel data for paraphrase evaluation","volume-title":"Proc. Annu. Meeting Assoc. Comput. Linguistics, Hum. Lang. Technol.","author":"Chen"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1145\/3123266.3123427"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00320"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.11"},{"key":"ref56","article-title":"Multimodal pretraining for dense video captioning","author":"Huang","year":"2020","journal-title":"arXiv:2011.11760"},{"key":"ref57","article-title":"Use what you have: Video retrieval using representations from collaborative experts","author":"Liu","year":"2019","journal-title":"arXiv:1907.13487"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00999"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3097171"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i8.16822"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01660"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00171"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.74"}],"container-title":["IEEE Transactions on Circuits and Systems for Video Technology"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/76\/10492617\/10214396.pdf?arnumber=10214396","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,4,9]],"date-time":"2024-04-09T19:54:02Z","timestamp":1712692442000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10214396\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,4]]},"references-count":63,"journal-issue":{"issue":"4"},"URL":"https:\/\/doi.org\/10.1109\/tcsvt.2023.3303945","relation":{},"ISSN":["1051-8215","1558-2205"],"issn-type":[{"value":"1051-8215","type":"print"},{"value":"1558-2205","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,4]]}}}