{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,12,30]],"date-time":"2025-12-30T23:49:41Z","timestamp":1767138581907,"version":"build-2238731810"},"publisher-location":"Cham","reference-count":63,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031198328","type":"print"},{"value":"9783031198335","type":"electronic"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-19833-5_36","type":"book-chapter","created":{"date-parts":[[2022,11,3]],"date-time":"2022-11-03T20:40:30Z","timestamp":1667508030000},"page":"616-633","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["VTC: Improving Video-Text Retrieval with User Comments"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3423-9373","authenticated-orcid":false,"given":"Laura","family":"Hanu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8410-2570","authenticated-orcid":false,"given":"James","family":"Thewlis","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8533-4020","authenticated-orcid":false,"given":"Yuki M.","family":"Asano","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3994-8045","authenticated-orcid":false,"given":"Christian","family":"Rupprecht","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,4]]},"reference":[{"key":"36_CR1","unstructured":"Alwassel, H., Korbar, B., Mahajan, D., Torresani, L., Ghanem, B., Tran, D.: Self-supervised learning by cross-modal audio-video clustering. In: NeurIPS (2020)"},{"key":"36_CR2","unstructured":"Asano, Y.M., Patrick, M., Rupprecht, C., Vedaldi, A.: Labelling unlabelled videos from scratch with multi-modal self-supervision. In: NeurIPS (2020)"},{"key":"36_CR3","unstructured":"Asano, Y.M., Rupprecht, C., Zisserman, A., Vedaldi, A.: Pass: an imagenet replacement for self-supervised pretraining without humans. In: Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1) (2021)"},{"key":"36_CR4","doi-asserted-by":"crossref","unstructured":"Bain, M., Nagrani, A., Varol, G., Zisserman, A.: Frozen in time: a joint video and image encoder for end-to-end retrieval. arXiv preprint arXiv:2104.00650 (2021)","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"36_CR5","unstructured":"Bertasius, G., Wang, H., Torresani, L.: Is space-time attention all you need for video understanding? arXiv preprint arXiv:2102.05095 (2021)"},{"key":"36_CR6","unstructured":"Carreira, J., Noland, E., Hillier, C., Zisserman, A.: A short note on the kinetics-700 human action dataset. arXiv preprint arXiv:1907.06987 (2019)"},{"key":"36_CR7","doi-asserted-by":"crossref","unstructured":"Carreira, J., Zisserman, A.: Quo vadis, action recognition? a new model and the kinetics dataset. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6299\u20136308 (2017)","DOI":"10.1109\/CVPR.2017.502"},{"key":"36_CR8","doi-asserted-by":"crossref","unstructured":"Chen, H., Xie, W., Vedaldi, A., Zisserman, A.: VGGsound: a large-scale audio-visual dataset. In: ICASSP 2020\u20132020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 721\u2013725. IEEE (2020)","DOI":"10.1109\/ICASSP40776.2020.9053174"},{"key":"36_CR9","unstructured":"Chen, M., Radford, A., Child, R., Wu, J., Jun, H.: Generative pretraining from pixels. In: ICML (2020)"},{"key":"36_CR10","unstructured":"Chen, Y.C., et al.: Uniter: learning universal image-text representations. arXiv preprint arXiv:1909.11740 (2019)"},{"key":"36_CR11","unstructured":"Clark, K., Luong, M.T., Le, Q.V., Manning, C.D.: ELECTRA: pre-training text encoders as discriminators rather than generators. In: ICLR (2020)"},{"key":"36_CR12","doi-asserted-by":"crossref","unstructured":"Desai, K., Johnson, J.: Virtex: learning visual representations from textual annotations. arXiv preprint arXiv:2006.06666 (2020)","DOI":"10.1109\/CVPR46437.2021.01101"},{"key":"36_CR13","unstructured":"Desai, K., Kaul, G., Aysola, Z., Johnson, J.: Redcaps: Web-curated image-text data created by the people, for the people. arXiv preprint arXiv:2111.11431 (2021)"},{"key":"36_CR14","unstructured":"Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: pre-training of deep bidirectional transformers for language understanding. In: ACL (2019)"},{"key":"36_CR15","unstructured":"Fang, H., Xiong, P., Xu, L., Chen, Y.: Clip2video: mastering video-text retrieval via image clip. arXiv preprint arXiv:2106.11097 (2021)"},{"key":"36_CR16","doi-asserted-by":"crossref","unstructured":"Gebru, T., et al.: Datasheets for datasets. Commun. ACM 64(12), 86\u201392 (2021)","DOI":"10.1145\/3458723"},{"key":"36_CR17","doi-asserted-by":"crossref","unstructured":"Halevy, A., et al.: Preserving integrity in online social networks. arXiv preprint arXiv:2009.10311 (2020)","DOI":"10.1145\/3394486.3409548"},{"key":"36_CR18","unstructured":"Hanu, L., Unitary team: detoxify. Github. https:\/\/github.com\/unitaryai\/detoxify (2020)"},{"key":"36_CR19","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"36_CR20","doi-asserted-by":"crossref","unstructured":"Hendricks, L.A., Wang, O., Shechtman, E., Sivic, J., Darrell, T., Russell, B.: Localizing moments in video with temporal language. In: Empirical Methods in Natural Language Processing (EMNLP) (2018)","DOI":"10.18653\/v1\/D18-1168"},{"key":"36_CR21","doi-asserted-by":"crossref","unstructured":"Huang, P.Y., Patrick, M., Hu, J., Neubig, G., Metze, F., Hauptmann, A.: Multilingual multimodal pre-training for zero-shot cross-lingual transfer of vision-language models. In: Meeting of the North American Chapter of the Association for Computational Linguistics (NAACL) (2021)","DOI":"10.18653\/v1\/2021.naacl-main.195"},{"key":"36_CR22","unstructured":"Johnson, J., Douze, M., J\u00e9gou, H.: Billion-scale similarity search with GPUS. arXiv preprint arXiv:1702.08734 (2017)"},{"key":"36_CR23","unstructured":"Kay, W., et al.: The kinetics human action video dataset. arXiv preprint arXiv:1705.06950 (2017)"},{"key":"36_CR24","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. In: ICLR (2015)"},{"key":"36_CR25","doi-asserted-by":"crossref","unstructured":"Krishna, R., Hata, K., Ren, F., Fei-Fei, L., Carlos Niebles, J.: Dense-captioning events in videos. In: CVPR (2017)","DOI":"10.1109\/ICCV.2017.83"},{"key":"36_CR26","doi-asserted-by":"crossref","unstructured":"Kuznetsova, A., et al.: The open images dataset v4. Int. J. Comput. Vis. 128(7), 1956\u20131981 (2020)","DOI":"10.1007\/s11263-020-01316-z"},{"key":"36_CR27","doi-asserted-by":"crossref","unstructured":"Lei, C., et al.: Understanding chinese video and language via contrastive multimodal pre-training. In: Proceedings of the 29th ACM International Conference on Multimedia, pp. 2567\u20132576 (2021)","DOI":"10.1145\/3474085.3475431"},{"key":"36_CR28","doi-asserted-by":"crossref","unstructured":"Lei, J., et al.: Less is more: ClipBERT for video-and-language learning via sparse sampling. CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.00725"},{"key":"36_CR29","unstructured":"Lewis, M., Ghazvininejad, M., Ghosh, G., Aghajanyan, A., Wang, S., Zettlemoyer, L.: Pre-training via paraphrasing. arXiv preprint arXiv:2006.15020 (2020)"},{"key":"36_CR30","doi-asserted-by":"crossref","unstructured":"Lewis, M., et al.: Bart: denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In: ACL (2020)","DOI":"10.18653\/v1\/2020.acl-main.703"},{"key":"36_CR31","doi-asserted-by":"crossref","unstructured":"Li, G., Duan, N., Fang, Y., Gong, M., Jiang, D., Zhou, M.: Unicoder-vl: a universal encoder for vision and language by cross-modal pre-training. In: AAAI (2020)","DOI":"10.1609\/aaai.v34i07.6795"},{"key":"36_CR32","unstructured":"Li, J., Li, D., Xiong, C., Hoi, S.: Blip: bootstrapping language-image pre-training for unified vision-language understanding and generation (2022)"},{"key":"36_CR33","doi-asserted-by":"crossref","unstructured":"Li, L., Chen, Y.C., Cheng, Y., Gan, Z., Yu, L., Liu, J.: Hero: Hierarchical encoder for video+ language omni-representation pre-training. EMNLP (2020)","DOI":"10.18653\/v1\/2020.emnlp-main.161"},{"key":"36_CR34","unstructured":"Li, L.H., Yatskar, M., Yin, D., Hsieh, C.J., Chang, K.W.: Visualbert: a simple and performant baseline for vision and language. arXiv preprint arXiv:1908.03557 (2019)"},{"key":"36_CR35","doi-asserted-by":"publisher","unstructured":"Lin, T.-Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"36_CR36","unstructured":"Lu, J., Batra, D., Parikh, D., Lee, S.: Vilbert: pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In: NeurIps (2019)"},{"key":"36_CR37","unstructured":"Luo, H., et al.: UniVL: a unified video and language pre-training model for multimodal understanding and generation. arXiv preprint arXiv:2002.06353 (2020)"},{"key":"36_CR38","doi-asserted-by":"crossref","unstructured":"Luo, H., et al.: Clip4clip: an empirical study of clip for end to end video clip retrieval. arXiv preprint arXiv:2104.08860 (2021)","DOI":"10.1016\/j.neucom.2022.07.028"},{"key":"36_CR39","doi-asserted-by":"crossref","unstructured":"Ma, S., Cui, L., Dai, D., Wei, F., Sun, X.: Livebot: generating live video comments based on visual and textual contexts. In: AAAI 2019 (2019)","DOI":"10.1609\/aaai.v33i01.33016810"},{"key":"36_CR40","doi-asserted-by":"crossref","unstructured":"Miech, A., Zhukov, D., Alayrac, J.B., Tapaswi, M., Laptev, I., Sivic, J.: Howto100m: learning a text-video embedding by watching hundred million narrated video clips. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00272"},{"key":"36_CR41","doi-asserted-by":"crossref","unstructured":"Morgado, P., Vasconcelos, N., Misra, I.: Audio-visual instance discrimination with cross-modal agreement. arXiv preprint arXiv:2004.12943 (2020)","DOI":"10.1109\/CVPR46437.2021.01229"},{"key":"36_CR42","doi-asserted-by":"crossref","unstructured":"Mu, N., Kirillov, A., Wagner, D., Xie, S.: Slip: Self-supervision meets language-image pre-training. arXiv preprint arXiv:2112.12750 (2021)","DOI":"10.1007\/978-3-031-19809-0_30"},{"key":"36_CR43","unstructured":"Patrick, M., et al.: Multi-modal self-supervision from generalized data transformations (2021)"},{"key":"36_CR44","unstructured":"Patrick, M., et al.: Support-set bottlenecks for video-text representation learning. arXiv preprint arXiv:2010.02824 (2020)"},{"key":"36_CR45","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. arXiv preprint arXiv:2103.00020 (2021)"},{"issue":"8","key":"36_CR46","first-page":"9","volume":"1","author":"A Radford","year":"2019","unstructured":"Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I.: Language models are unsupervised multitask learners. OpenAI Blog 1(8), 9 (2019)","journal-title":"OpenAI Blog"},{"key":"36_CR47","unstructured":"Raffel, C., et al.: Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv preprint arXiv:1910.10683 (2019)"},{"key":"36_CR48","unstructured":"Rebuffi, S.A., Bilen, H., Vedaldi, A.: Learning multiple visual domains with residual adapters. In: NeurIPS (2017)"},{"key":"36_CR49","doi-asserted-by":"crossref","unstructured":"Rohrbach, A., et al.: Movie description. Int. J. Comput. Vis. 123 (2017)","DOI":"10.1007\/s11263-016-0987-1"},{"key":"36_CR50","doi-asserted-by":"crossref","unstructured":"Ruan, L., Jin, Q.: Survey: transformer based video-language pre-training. AI Open (2022)","DOI":"10.1016\/j.aiopen.2022.01.001"},{"key":"36_CR51","doi-asserted-by":"crossref","unstructured":"Sariyildiz, M.B., Perez, J., Larlus, D.: Learning visual representations with caption annotations. In: ECCV. pp. 153\u2013170, Springer (2020)","DOI":"10.1007\/978-3-030-58598-3_10"},{"key":"36_CR52","doi-asserted-by":"crossref","unstructured":"Sharma, P., Ding, N., Goodman, S., Soricut, R.: Conceptual captions: a cleaned, hypernymed, image alt-text dataset for automatic image captioning. In: Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2556\u20132565 (2018)","DOI":"10.18653\/v1\/P18-1238"},{"key":"36_CR53","unstructured":"Su, W., et al.: VL-BERT: pre-training of generic visual-linguistic representations. arXiv preprint arXiv:1908.08530 (2019)"},{"key":"36_CR54","unstructured":"Sun, C., Baradel, F., Murphy, K., Schmid, C.: Learning video representations using contrastive bidirectional transformer. arXiv preprint arXiv:1906.05743 (2019)"},{"key":"36_CR55","doi-asserted-by":"crossref","unstructured":"Sun, C., Myers, A., Vondrick, C., Murphy, K., Schmid, C.: Videobert: a joint model for video and language representation learning. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00756"},{"key":"36_CR56","doi-asserted-by":"crossref","unstructured":"Tan, H., Bansal, M.: Lxmert: learning cross-modality encoder representations from transformers. In: EMNLP (2019)","DOI":"10.18653\/v1\/D19-1514"},{"key":"36_CR57","doi-asserted-by":"crossref","unstructured":"Thomee, B., et al.: Yfcc100m: the new data in multimedia research. Commun. ACM 59(2), 64\u201373 (2016)","DOI":"10.1145\/2812802"},{"key":"36_CR58","unstructured":"Vaswani, A., et al.: Attention is all you need. In: NeurIPS (2017)"},{"key":"36_CR59","doi-asserted-by":"crossref","unstructured":"Venugopalan, S., Rohrbach, M., Donahue, J., Mooney, R., Darrell, T., Saenko, K.: Sequence to sequence - video to text. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV) (2015)","DOI":"10.1109\/ICCV.2015.515"},{"key":"36_CR60","unstructured":"Wu, H., Jones, G.J., Pitie, F.: Response to livebot: generating live video comments based on visual and textual contexts. arXiv preprint arXiv:2006.03022 (2020)"},{"key":"36_CR61","doi-asserted-by":"crossref","unstructured":"Xu, J., Mei, T., Yao, T., Rui, Y.: MSR-VTT: a large video description dataset for bridging video and language. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.571"},{"key":"36_CR62","unstructured":"Yao, L., et al.: FILIP: fine-grained interactive language-image pre-training. In: International Conference on Learning Representations (2022)"},{"key":"36_CR63","doi-asserted-by":"crossref","unstructured":"l Zhu, L., Yang, Y.: Actbert: learning global-local video-text representations. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.00877"}],"updated-by":[{"DOI":"10.1007\/978-3-031-19833-5_43","type":"correction","label":"Correction","source":"publisher","updated":{"date-parts":[[2023,1,10]],"date-time":"2023-01-10T00:00:00Z","timestamp":1673308800000}}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-19833-5_36","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2023,1,9]],"date-time":"2023-01-09T10:46:09Z","timestamp":1673261169000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-19833-5_36"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031198328","9783031198335"],"references-count":63,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-19833-5_36","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"4 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"10 January 2023","order":2,"name":"change_date","label":"Change Date","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"Correction","order":3,"name":"change_type","label":"Change Type","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"In the originally published version of chapter 36, the place and country of the first affiliation, Unitary Ltd., erroneously showed \u201cMoscow, Russia\u201d instead of  \u201cLondon, UK\u201d. This has been corrected.","order":4,"name":"change_details","label":"Change Details","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"From the workshops, 367 reviewed full papers have been selected for publication","order":10,"name":"additional_info_on_review_process","label":"Additional Info on Review Process","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}