{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,25]],"date-time":"2026-03-25T14:39:13Z","timestamp":1774449553559,"version":"3.50.1"},"reference-count":63,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"12","license":[{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2025,12,1]],"date-time":"2025-12-01T00:00:00Z","timestamp":1764547200000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"funder":[{"name":"Shenzhen Science and Technology Program","award":["JCYJ20220818101014030"],"award-info":[{"award-number":["JCYJ20220818101014030"]}]},{"name":"Tsinghua University - Tencent Joint Laboratory for Internet Innovation Technology"},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62402043"],"award-info":[{"award-number":["62402043"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62172039"],"award-info":[{"award-number":["62172039"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["U21B2009"],"award-info":[{"award-number":["U21B2009"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62276110"],"award-info":[{"award-number":["62276110"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Pattern Anal. Mach. Intell."],"published-print":{"date-parts":[[2025,12]]},"DOI":"10.1109\/tpami.2025.3596394","type":"journal-article","created":{"date-parts":[[2025,8,6]],"date-time":"2025-08-06T17:59:54Z","timestamp":1754503194000},"page":"11065-11079","source":"Crossref","is-referenced-by-count":3,"title":["Global and Local Semantic Completion Learning for Vision-Language Pre-Training"],"prefix":"10.1109","volume":"47","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-9567-159X","authenticated-orcid":false,"given":"Rong-Cheng","family":"Tu","sequence":"first","affiliation":[{"name":"College of Computing and Data Science, Nanyang Technological University, Singapore"}]},{"given":"Yatai","family":"Ji","sequence":"additional","affiliation":[{"name":"Shenzhen International Graduate School, Tsinghua University, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-9658-5127","authenticated-orcid":false,"given":"Jie","family":"Jiang","sequence":"additional","affiliation":[{"name":"Tencent Hunyuan team, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-1700-4801","authenticated-orcid":false,"given":"Weijie","family":"Kong","sequence":"additional","affiliation":[{"name":"Tencent Hunyuan team, Shenzhen, China"}]},{"given":"Chengfei","family":"Cai","sequence":"additional","affiliation":[{"name":"Tencent Hunyuan team, Shenzhen, China"}]},{"given":"Wenzhe","family":"Zhao","sequence":"additional","affiliation":[{"name":"Tencent Hunyuan team, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0001-8230-9471","authenticated-orcid":false,"given":"Hongfa","family":"Wang","sequence":"additional","affiliation":[{"name":"Tencent Hunyuan team, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6427-1024","authenticated-orcid":false,"given":"Yujiu","family":"Yang","sequence":"additional","affiliation":[{"name":"Shenzhen International Graduate School, Tsinghua University, Shenzhen, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-3865-8145","authenticated-orcid":false,"given":"Wei","family":"Liu","sequence":"additional","affiliation":[{"name":"Tencent Hunyuan team, Shenzhen, China"}]}],"member":"263","reference":[{"key":"ref1","article-title":"Improving video-text retrieval by multi-stream corpus alignment and dual softmax loss","author":"Cheng","year":"2021"},{"key":"ref2","article-title":"Unified-IO: A unified model for vision, language, and multi-modal tasks","author":"Lu","year":"2022"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/D19-1514"},{"key":"ref4","first-page":"13","article-title":"VilBERT: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Lu"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.202"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01569"},{"key":"ref7","article-title":"MAP: Modality-agnostic uncertainty-aware vision-language pre-training model","author":"Ji","year":"2022"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_8"},{"key":"ref9","article-title":"VIOLET : End-to-end video-language transformers with masked visual-token modeling","author":"Fu","year":"2021"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.00175"},{"key":"ref11","article-title":"VLMAE: Vision-language masked autoencoder","author":"He","year":"2022"},{"key":"ref12","article-title":"Egocentric video-language pretraining","author":"Lin","year":"2022"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/n19-1423"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1145\/3503161.3548320"},{"key":"ref15","article-title":"VL-BEiT: Generative vision-language pretraining","author":"Bao","year":"2022"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52729.2023.01838"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52729.2023.02193"},{"key":"ref18","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Radford"},{"key":"ref19","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Jia"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58548-8_13"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.emnlp-main.544"},{"key":"ref22","first-page":"24206","article-title":"VATT: Transformers for multimodal self-supervised learning from raw video, audio and text","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Akbari"},{"key":"ref23","article-title":"Hierarchical text-conditional image generation with CLIP latents","author":"Ramesh","year":"2022"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01139"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1145\/3652583.3658044"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01594"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01524"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-19833-5_40"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01763"},{"key":"ref31","first-page":"9694","article-title":"Align before fuse: Vision and language representation learning with momentum distillation","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Li"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01522"},{"key":"ref33","first-page":"5583","article-title":"ViLT: Vision-and-language transformer without convolution or region supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Kim"},{"key":"ref34","article-title":"VLMo: Unified vision-language pre-training with mixture-of-modality-experts","author":"Wang","year":"2021"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1109\/cvpr52729.2023.01427"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00490"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1177\/107769905303000401"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.findings-emnlp.196"},{"key":"ref39","article-title":"Masked vision and language modeling for multi-modal representation learning","author":"Kwon","year":"2022"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01426"},{"key":"ref41","article-title":"An image is worth 16x16 words: Transformers for image recognition at scale","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Dosovitskiy"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0965-7"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/iccv.2017.201"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/d14-1086"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-10602-1_48"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-016-0981-7"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P18-1238"},{"key":"ref49","first-page":"1143","article-title":"Im2Text: Describing images using 1 million captioned photographs","volume-title":"Proc. Int. Conf. Neural Inf. Process. Syst.","author":"Ordonez"},{"key":"ref50","article-title":"RoBERTa: A robustly optimized BERT pretraining approach","author":"Liu","year":"2019"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW50498.2020.00359"},{"key":"ref52","article-title":"SimVLM: Simple visual language model pretraining with weak supervision","volume-title":"Proc. Int. Conf. Learn. Representations","author":"Wang"},{"key":"ref53","first-page":"23318","article-title":"OFA: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Wang"},{"key":"ref54","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref55","article-title":"Pixel-BERT: Aligning image pixels with text by deep multi-modal transformers","author":"Huang","year":"2020"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00553"},{"key":"ref57","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.01278"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.670"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/P19-1644"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.303"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.571"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298940"},{"key":"ref63","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2021.acl-long.42"}],"container-title":["IEEE Transactions on Pattern Analysis and Machine Intelligence"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/34\/11230086\/11117175.pdf?arnumber=11117175","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,11,6]],"date-time":"2025-11-06T05:47:46Z","timestamp":1762408066000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/11117175\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12]]},"references-count":63,"journal-issue":{"issue":"12"},"URL":"https:\/\/doi.org\/10.1109\/tpami.2025.3596394","relation":{},"ISSN":["0162-8828","2160-9292","1939-3539"],"issn-type":[{"value":"0162-8828","type":"print"},{"value":"2160-9292","type":"electronic"},{"value":"1939-3539","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,12]]}}}