{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,13]],"date-time":"2026-04-13T23:17:27Z","timestamp":1776122247044,"version":"3.50.1"},"reference-count":125,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2025,2,1]],"date-time":"2025-02-01T00:00:00Z","timestamp":1738368000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,2,1]],"date-time":"2025-02-01T00:00:00Z","timestamp":1738368000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,2,1]],"date-time":"2025-02-01T00:00:00Z","timestamp":1738368000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U21B2043"],"award-info":[{"award-number":["U21B2043"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62102416"],"award-info":[{"award-number":["62102416"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Key Research and Development Program of Jiangsu Province","award":["BE2023016-3"],"award-info":[{"award-number":["BE2023016-3"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2025,2]]},"DOI":"10.1109\/tpami.2024.3479776","type":"journal-article","created":{"date-parts":[[2024,10,17]],"date-time":"2024-10-17T17:35:56Z","timestamp":1729186556000},"page":"708-724","source":"Crossref","is-referenced-by-count":23,"title":["VALOR: Vision-Audio-Language Omni-Perception Pretraining Model and Dataset"],"prefix":"10.1109","volume":"47","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0903-9131","authenticated-orcid":false,"given":"Jing","family":"Liu","sequence":"first","affiliation":[{"name":"School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0001-3539-8085","authenticated-orcid":false,"given":"Sihan","family":"Chen","sequence":"additional","affiliation":[{"name":"School of Artificial Intelligence, University of Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-5396-6253","authenticated-orcid":false,"given":"Xingjian","family":"He","sequence":"additional","affiliation":[{"name":"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4340-4000","authenticated-orcid":false,"given":"Longteng","family":"Guo","sequence":"additional","affiliation":[{"name":"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2142-5580","authenticated-orcid":false,"given":"Xinxin","family":"Zhu","sequence":"additional","affiliation":[{"name":"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7299-6431","authenticated-orcid":false,"given":"Weining","family":"Wang","sequence":"additional","affiliation":[{"name":"National Laboratory of Pattern Recognition, Institute of Automation, Chinese Academy of Sciences, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9008-222X","authenticated-orcid":false,"given":"Jinhui","family":"Tang","sequence":"additional","affiliation":[{"name":"School of Computer Science and Engineering, Nanjing University of Science and Technology, Nanjing, China"}]}],"member":"263","reference":[{"key":"ref1","first-page":"1571","article-title":"Bilinear attention networks","volume-title":"Proc. 32nd Int. Conf. Adv. Neural Inf. Process. Syst.","volume":"31","author":"Kim","year":"2018"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3059295"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2021.3132229"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2019.2946823"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00273"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00636"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2020.3004830"},{"key":"ref8","first-page":"2","article-title":"BERT: Pre-training of deep bidirectional transformers for language understanding","volume":"1","author":"Kenton","year":"2019","journal-title":"Proc. NaacL-HLT"},{"key":"ref9","article-title":"Improving language understanding by generative pre-training","author":"Radford","year":"2018"},{"key":"ref10","first-page":"1877","article-title":"Language models are few-shot learners","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"33","author":"Brown","year":"2020"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/iccv.2019.00272"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00498"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"ref14","article-title":"GIT: A generative image-to-text transformer for vision and language","author":"Wang","year":"2022","journal-title":"Trans. Mach. Learn. Res."},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01589"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00990"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1238"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr46437.2021.00356"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01743"},{"key":"ref20","first-page":"1","article-title":"CLIP-ViP: Adapting pre-trained image-text model to video-language alignment","volume-title":"Proc. 11th Int. Conf. Learn. Representations","author":"Xue","year":"2022"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"ref22","first-page":"13","article-title":"ViLBERT: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"32","author":"Lu","year":"2019"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02193"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2020.emnlp-main.161"},{"key":"ref25","first-page":"9694","article-title":"Align before fuse: Vision and language representation learning with momentum distillation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Li","year":"2021"},{"key":"ref26","first-page":"91","article-title":"Faster R-CNN: Towards real-time object detection with region proposal networks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"28","author":"Ren","year":"2015"},{"key":"ref27","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","author":"Dosovitskiy","year":"2020"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"ref29","first-page":"1931","article-title":"Unifying vision-and-language tasks via text generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Cho"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01630"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.02214"},{"key":"ref32","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford","year":"2021"},{"key":"ref33","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jia","year":"2021"},{"key":"ref34","article-title":"Florence: A new foundation model for computer vision","author":"Yuan","year":"2021"},{"key":"ref35","article-title":"Combined scaling for open-vocabulary image classification","author":"Pham","year":"2021"},{"key":"ref36","first-page":"1","article-title":"SIMVLM: Simple visual language model pretraining with weak supervision","author":"Wang","year":"2021","journal-title":"Proc. Int. Conf. Learn. Representations"},{"key":"ref37","first-page":"23716","article-title":"Flamingo: A visual language model for few-shot learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Alayrac","year":"2022"},{"key":"ref38","first-page":"1","article-title":"Pali: A jointly-scaled multilingual language-image model","author":"Chen","year":"2022","journal-title":"Proc. 11th Int. Conf. Learn. Representations"},{"key":"ref39","article-title":"COCA: Contrastive captioners are image-text foundation models","author":"Yu","year":"2022","journal-title":"Proc. Trans. Mach. Learn. Res."},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58548-8_13"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1145\/3474085.3479216"},{"key":"ref42","article-title":"UniVL: A unified video and language pre-training model for multimodal understanding and generation","author":"Luo","year":"2020"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-acl.370"},{"key":"ref44","first-page":"1","article-title":"Value: A multi-task benchmark for video-and-language understanding evaluation","author":"Li","year":"2021","journal-title":"Proc. 35th Conf. Neural Inf. Process. Syst. Datasets Benchmarks Track (Round 1)"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-1312"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00791"},{"key":"ref47","first-page":"24206","article-title":"VATT: Transformers for multimodal self-supervised learning from raw video, audio and text","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Akbari","year":"2021"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01939"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v37i9.26290"},{"key":"ref50","first-page":"190","article-title":"Collecting highly parallel data for paraphrase evaluation","volume-title":"Proc. 49th Annu. Meeting Assoc. Comput. Linguistics: Hum. Lang. Technol.","author":"Chen","year":"2011"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2019.00468"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v32i1.12342"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.618"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2017.83"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0987-1"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9052990"},{"key":"ref58","first-page":"119","article-title":"Audiocaps: Generating captions for audios in the wild","volume-title":"Proc. 2019 Conf. North Amer. Chapter Assoc. Comput. Linguistics: Hum. Lang. Technol.","author":"Kim","year":"2019"},{"key":"ref59","first-page":"2031","article-title":"Pano-AVQA: Grounded audio-visual question answering on 360$^\\circ$\u2218 videos","volume-title":"Proc. IEEE\/CVF Int. Conf. Comput. Vis.","author":"Yun","year":"2021"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01852"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3548291"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2017.7952261"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00320"},{"key":"ref64","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-698"},{"key":"ref65","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v36i10.21315"},{"key":"ref66","article-title":"Improving video-text retrieval by multi-stream corpus alignment and dual softmax loss","author":"Cheng","year":"2021"},{"key":"ref67","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00725"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01569"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19833-5_40"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00331"},{"key":"ref71","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19781-9_24"},{"key":"ref72","first-page":"38032","article-title":"Long-form video-language pre-training with multimodal temporal contrastive learning","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Sun","year":"2022"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.29"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01136"},{"key":"ref75","first-page":"1","article-title":"Support-set bottlenecks for video-text representation learning","author":"Patrick","year":"2020","journal-title":"Proc. Int. Conf. Learn. Representations"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.1109\/WACV51458.2022.00217"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00638"},{"key":"ref78","article-title":"VIOLET: End-to-end video-language transformers with masked visual-token modeling","author":"Fu","year":"2021"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2022.07.028"},{"key":"ref80","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19781-9_19"},{"key":"ref81","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3547910"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19830-4_24"},{"key":"ref83","article-title":"Disentangled representation learning for text-video retrieval","author":"Wang","year":"2022"},{"key":"ref84","article-title":"Hunyuan_tvr for text-video retrivial","author":"Min","year":"2022"},{"key":"ref85","article-title":"InternVideo: General video foundation models via generative and discriminative learning","author":"Wang","year":"2022"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"key":"ref88","first-page":"1143","article-title":"Im2Text: Describing images using 1 million captioned photographs","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"24","author":"Ordonez","year":"2011"},{"key":"ref89","first-page":"311","article-title":"BLEU: A method for automatic evaluation of machine translation","volume-title":"Proc. 40th Annu. Meeting Assoc. Comput. Linguistics","author":"Papineni","year":"2002"},{"key":"ref90","first-page":"65","article-title":"METEOR: An automatic metric for MT evaluation with improved correlation with human judgments","volume-title":"Proc. ACL Workshop Intrinsic Extrinsic Eval. Measures Mach. Transl. Summarization","author":"Banerjee","year":"2005"},{"key":"ref91","first-page":"65","article-title":"An automatic metric for MT evaluation with improved correlation with human judgments","volume-title":"Proc. ACL-2005 Workshop Intrinsic Extrinsic Eval. Measures MT Summarization","author":"Banerjee","year":"2005"},{"key":"ref92","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"ref93","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-46454-1_24"},{"key":"ref94","doi-asserted-by":"publisher","DOI":"10.1145\/3123266.3123427"},{"key":"ref95","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v33i01.33019127"},{"key":"ref96","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.149"},{"key":"ref97","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.670"},{"key":"ref98","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.131"},{"key":"ref99","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01329"},{"key":"ref100","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00971"},{"key":"ref101","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01742"},{"key":"ref102","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i07.6766"},{"key":"ref103","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01427"},{"key":"ref104","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00171"},{"key":"ref105","first-page":"23634","article-title":"MERLOT: Multimodal neural script knowledge models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"34","author":"Zellers","year":"2021"},{"key":"ref106","first-page":"124","article-title":"Zero-shot video question answering via frozen bidirectional language models","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Yang","year":"2022"},{"key":"ref107","article-title":"Video-text modeling with zero-shot transfer from contrastive captioners","author":"Yan","year":"2022"},{"key":"ref108","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2021-2227"},{"key":"ref109","first-page":"225","article-title":"A CRNN-GRU based reinforcement learning approach to audio captioning","volume-title":"Proc. Detection Classification Acoust. Scenes Events","author":"Xu","year":"2020"},{"key":"ref110","first-page":"21","article-title":"Audio captioning based on transformer and pre-trained CNN","volume-title":"Proc. Detection Classification Acoust. Scenes Events","author":"Chen","year":"2020"},{"key":"ref111","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP39728.2021.9413982"},{"key":"ref112","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP43922.2022.9747676"},{"key":"ref113","article-title":"Audio captioning transformer","author":"Mei","year":"2021"},{"key":"ref114","doi-asserted-by":"publisher","DOI":"10.23919\/EUSIPCO55093.2022.9909761"},{"key":"ref115","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-20053-3_5"},{"key":"ref116","first-page":"1","article-title":"DeBERTa: Decoding-enhanced bert with disentangled attention","author":"He","year":"2020","journal-title":"Proc. Int. Conf. Learn. Representations"},{"key":"ref117","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_8"},{"key":"ref118","doi-asserted-by":"publisher","DOI":"10.32657\/10356\/169546"},{"key":"ref119","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00553"},{"key":"ref120","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01763"},{"key":"ref121","first-page":"1","article-title":"Filip: Fine-grained interactive language-image pre-training","author":"Yao","year":"2021","journal-title":"Proc. Int. Conf. Learn. Representations"},{"key":"ref122","first-page":"12888","article-title":"Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li","year":"2022"},{"key":"ref123","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52688.2022.01745"},{"key":"ref124","first-page":"1","article-title":"Contrastive audio-visual masked autoencoder","author":"Gong","year":"2022","journal-title":"Proc. 11th Int. Conf. Learn. Representations"},{"key":"ref125","first-page":"9617","article-title":"TVLT: Textless vision-language transformer","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","volume":"35","author":"Tang","year":"2022"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/34\/10835210\/10721284.pdf?arnumber=10721284","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,10]],"date-time":"2025-01-10T05:44:56Z","timestamp":1736487896000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10721284\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,2]]},"references-count":125,"journal-issue":{"issue":"2"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2024.3479776","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,2]]}}}