{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,10,31]],"date-time":"2025-10-31T08:04:00Z","timestamp":1761897840899,"version":"3.37.3"},"reference-count":55,"publisher":"IEEE","license":[{"start":{"date-parts":[[2023,6,1]],"date-time":"2023-06-01T00:00:00Z","timestamp":1685577600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,6,1]],"date-time":"2023-06-01T00:00:00Z","timestamp":1685577600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62171248"],"award-info":[{"award-number":["62171248"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023,6]]},"DOI":"10.1109\/cvpr52729.2023.02210","type":"proceedings-article","created":{"date-parts":[[2023,8,22]],"date-time":"2023-08-22T17:30:52Z","timestamp":1692725452000},"page":"23079-23089","source":"Crossref","is-referenced-by-count":3,"title":["Learning Transferable Spatiotemporal Representations from Natural Script Knowledge"],"prefix":"10.1109","author":[{"given":"Ziyun","family":"Zeng","sequence":"first","affiliation":[{"name":"Tsinghua University"}]},{"given":"Yuying","family":"Ge","sequence":"additional","affiliation":[{"name":"The University of Hong Kong"}]},{"given":"Xihui","family":"Liu","sequence":"additional","affiliation":[{"name":"The University of Hong Kong"}]},{"given":"Bin","family":"Chen","sequence":"additional","affiliation":[{"name":"Harbin Institute of Technology,Shenzhen"}]},{"given":"Ping","family":"Luo","sequence":"additional","affiliation":[{"name":"The University of Hong Kong"}]},{"given":"Shu-Tao","family":"Xia","sequence":"additional","affiliation":[{"name":"Tsinghua University"}]},{"given":"Yixiao","family":"Ge","sequence":"additional","affiliation":[{"name":"Applied Research Center (ARC), Tencent PCG"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i8.16822"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.618"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00911"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2102.05095"},{"key":"ref7","first-page":"190","article-title":"Collecting highly parallel data for paraphrase evaluation","volume-title":"Proceedings of the 49th annual meeting of the association for computational linguistics: human language technologies","author":"Chen","year":"2011"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i2.16189"},{"key":"ref9","first-page":"1597","article-title":"A simple framework for contrastive learning of visual representations","volume-title":"International conference on machine learning","author":"Chen","year":"2020"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00950"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2022.103406"},{"key":"ref12","article-title":"Bert: Pre-training of deep bidirectional transformers for language understanding","author":"Devlin","year":"2018","journal-title":"arXiv preprint"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00153"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW56347.2022.00458"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.48550\/arXiv.2010.11929"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00675"},{"key":"ref17","article-title":"Masked autoencoders as spatiotemporal learners","author":"Feichtenhofer","year":"2022","journal-title":"arXiv preprint"},{"key":"ref18","article-title":"Violet: End-to-end video-language transformers with masked visual-token modeling","author":"Fu","year":"2021","journal-title":"arXiv preprint"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58548-8_13"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01569"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19833-5_40"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.622"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01553"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00784"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00799"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01367"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00982"},{"key":"ref30","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"International Conference on Machine Learning","author":"Jia","year":"2021"},{"key":"ref31","article-title":"The kinetics human action video dataset","author":"Kay","year":"2017","journal-title":"arXiv preprint"},{"key":"ref32","first-page":"8046","article-title":"Relational self-attention: Whats missing in attention for video understanding","volume":"34","author":"Kim","year":"2021","journal-title":"Advances in Neural Information Processing Systems"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2011.6126543"},{"key":"ref34","article-title":"Revealing single frame bias for video-and-Ianguage learning","author":"Lei","year":"2022","journal-title":"arXiv preprint"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00725"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00718"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00813"},{"key":"ref38","article-title":"Use what you have: Video retrieval using representations from collaborative experts","author":"Liu","year":"2019","journal-title":"arXiv preprint"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01345"},{"key":"ref40","article-title":"Representation learning with contrastive predictive coding","author":"van den Oord","year":"2018","journal-title":"arXiv preprint"},{"key":"ref41","article-title":"Support-set bottlenecks for video-text representation learning","author":"Patrick","year":"2020","journal-title":"arXiv preprint"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00689"},{"key":"ref43","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"International Conference on Machine Learning","author":"Radford","year":"2021"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00289"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298940"},{"key":"ref46","article-title":"Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter","author":"Sanh","year":"2019","journal-title":"arXiv preprint"},{"key":"ref47","article-title":"Is a caption worth a thousand images? a controlled study for representation learning","author":"Santurkar","year":"2022","journal-title":"arXiv preprint"},{"key":"ref48","article-title":"Ucfl 01: A dataset of 101 human actions classes from videos in the wild","author":"Soomro","year":"2012","journal-title":"arXiv preprint"},{"key":"ref49","article-title":"Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training","author":"Tong","year":"2022","journal-title":"arXiv preprint"},{"key":"ref50","article-title":"Omnivl: One foundation model for image-language and video-language tasks","author":"Wang","year":"2022","journal-title":"arXiv preprint"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01163"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46484-8_2"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01058"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref55","first-page":"23634","article-title":"Merlot: Multimodal neural script knowledge models","volume":"34","author":"Zellers","year":"2021","journal-title":"Advances in Neural Information Processing Systems"}],"event":{"name":"2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)","start":{"date-parts":[[2023,6,17]]},"location":"Vancouver, BC, Canada","end":{"date-parts":[[2023,6,24]]}},"container-title":["2023 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/10203037\/10203050\/10204285.pdf?arnumber=10204285","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,1]],"date-time":"2024-03-01T17:23:38Z","timestamp":1709313818000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10204285\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,6]]},"references-count":55,"URL":"https:\/\/doi.org\/10.1109\/cvpr52729.2023.02210","relation":{},"subject":[],"published":{"date-parts":[[2023,6]]}}}