{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,11]],"date-time":"2026-02-11T13:42:33Z","timestamp":1770817353653,"version":"3.50.1"},"reference-count":65,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"9","license":[{"start":{"date-parts":[[2025,9,1]],"date-time":"2025-09-01T00:00:00Z","timestamp":1756684800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/legalcode"}],"funder":[{"name":"Beijing Natural Science Foundation","award":["JQ23016"],"award-info":[{"award-number":["JQ23016"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62476273"],"award-info":[{"award-number":["62476273"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Science and Technology Development Fund of Macau Project","award":["0084\/2024\/RIB2"],"award-info":[{"award-number":["0084\/2024\/RIB2"]}]},{"name":"Science and Technology Development Fund of Macau Project","award":["0044\/2024\/AGJ"],"award-info":[{"award-number":["0044\/2024\/AGJ"]}]},{"name":"Science and Technology Development Fund of Macau Project","award":["0123\/2022\/A3"],"award-info":[{"award-number":["0123\/2022\/A3"]}]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Circuits Syst. Video Technol."],"published-print":{"date-parts":[[2025,9]]},"DOI":"10.1109\/tcsvt.2025.3553052","type":"journal-article","created":{"date-parts":[[2025,3,19]],"date-time":"2025-03-19T16:01:32Z","timestamp":1742400092000},"page":"8533-8544","source":"Crossref","is-referenced-by-count":4,"title":["C<sup>2<\/sup>RL: Content and Context Representation Learning for Gloss-Free Sign Language Translation and Retrieval"],"prefix":"10.1109","volume":"35","author":[{"ORCID":"https:\/\/orcid.org\/0009-0001-6139-3755","authenticated-orcid":false,"given":"Zhigang","family":"Chen","sequence":"first","affiliation":[{"name":"State Key Laboratory of Multimodal Artificial Intelligence Systems (MAIS), Institute of Automation, Chinese Academy of Sciences (CASIA), Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4883-5552","authenticated-orcid":false,"given":"Benjia","family":"Zhou","sequence":"additional","affiliation":[{"name":"Faculty of Innovation Engineering, School of Computer Science and Engineering, Macau University of Science and Technology, Macau, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0003-8722-2094","authenticated-orcid":false,"given":"Yiqing","family":"Huang","sequence":"additional","affiliation":[{"name":"Faculty of Innovation Engineering, School of Computer Science and Engineering, Macau University of Science and Technology, Macau, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4735-2885","authenticated-orcid":false,"given":"Jun","family":"Wan","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Multimodal Artificial Intelligence Systems (MAIS), Institute of Automation, Chinese Academy of Sciences (CASIA), Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0009-0006-7762-6986","authenticated-orcid":false,"given":"Yibo","family":"Hu","sequence":"additional","affiliation":[{"name":"NIO, Digital Cockpit and Software Development, Beijing, China"}]},{"given":"Hailin","family":"Shi","sequence":"additional","affiliation":[{"name":"NIO, Digital Cockpit and Software Development, Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5780-8540","authenticated-orcid":false,"given":"Yanyan","family":"Liang","sequence":"additional","affiliation":[{"name":"Faculty of Innovation Engineering, School of Computer Science and Engineering, Macau University of Science and Technology, Macau, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0791-189X","authenticated-orcid":false,"given":"Zhen","family":"Lei","sequence":"additional","affiliation":[{"name":"State Key Laboratory of Multimodal Artificial Intelligence Systems (MAIS), Institute of Automation, Chinese Academy of Sciences (CASIA), Beijing, China"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-8301-2706","authenticated-orcid":false,"given":"Du","family":"Zhang","sequence":"additional","affiliation":[{"name":"Faculty of Innovation Engineering, School of Computer Science and Engineering, Macau University of Science and Technology, Macau, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.01004"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.01370"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.01823"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00506"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3059098"},{"key":"ref6","first-page":"1","article-title":"Two-stream network for sign language recognition and translation","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Chen"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV51070.2023.01908"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52729.2023.00251"},{"key":"ref9","first-page":"12034","article-title":"TSPNet: Hierarchical feature learning via temporal semantic pyramid for sign language translation","volume-title":"Proc. Annu. Conf. Neural Inf. Process. Syst. (NeurIPS)","author":"Li"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2021.3087006"},{"key":"ref11","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume-title":"Proc. Int. Conf. Mach. Learn.","volume":"139","author":"Radford"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2023.acl-long.722"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00276"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.18653\/v1\/2022.emnlp-main.427"},{"key":"ref15","first-page":"1","article-title":"CoCa: Contrastive captioners are image-text foundation models","volume-title":"Proc. Trans. Mach. Learn. Res.","author":"Yu"},{"key":"ref16","first-page":"12888","article-title":"BLIP: Bootstrapping language-image pre-training for unified vision-language understanding and generation","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Li"},{"key":"ref17","first-page":"1","article-title":"MS-ASL: A large-scale data set and benchmark for understanding American sign language","volume-title":"Proc. Brit. Mach. Vis. Conf.","author":"Joze"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2018.2870740"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR42600.2020.00624"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2019.2911077"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2020.2999384"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3296668"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52688.2022.00507"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00812"},{"key":"ref25","first-page":"11","article-title":"Large lexicon project: American sign language video corpus and sign language indexing\/retrieval algorithms","volume-title":"Proc. Eur. Language Resour. Assoc. (ELRA)","author":"Athitsos"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3409728"},{"key":"ref27","article-title":"CorrNet+: Sign language recognition and translation via spatial\u2013temporal correlation","author":"Hu","year":"2024","journal-title":"arXiv:2404.11111"},{"key":"ref28","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00137"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v38i17.29937"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v39i6.32612"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3376404"},{"key":"ref32","first-page":"1","article-title":"Sign2GPT: Leveraging large language models for gloss-free sign language translation","volume-title":"Proc. 12th Int. Conf. Learn. Represent.","author":"Wong"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR52733.2024.01738"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72946-1_20"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-031-72784-9_22"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1145\/3664647.3681237"},{"key":"ref37","article-title":"BLIP-2: Bootstrapping language-image pre-training with frozen image encoders and large language models","author":"Li","year":"2023","journal-title":"arXiv:2301.12597"},{"key":"ref38","first-page":"1","article-title":"ViLBERT: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks","volume-title":"Proc. Adv. Neural Inf. Process. Syst.","author":"Lu"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-030-58577-8_7"},{"key":"ref40","first-page":"4904","article-title":"Scaling up visual and vision-language representation learning with noisy text supervision","volume-title":"Proc. 38th Int. Conf. Mach. Learn.","volume":"139","author":"Jia"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3178844"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3177320"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2022.3165934"},{"key":"ref44","article-title":"VisualBERT: A simple and performant baseline for vision and language","author":"Harold Li","year":"2019","journal-title":"arXiv:1908.03557"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2024.3391304"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.1109\/TCSVT.2023.3284474"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1162\/tacl_a_00343"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref49","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.48550\/ARXIV.1706.03762"},{"key":"ref51","first-page":"297","article-title":"Noise-contrastive estimation: A new estimation principle for unnormalized statistical models","volume-title":"Proc. 13th Int. Conf. Artif. Intell. Statist.","author":"Gutmann"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.3115\/1073083.1073135"},{"key":"ref53","first-page":"74","article-title":"ROUGE: A package for automatic evaluation of summaries","volume-title":"Proc. Text Summarization Branches Out","author":"Lin"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1214\/aoms\/1177729586"},{"key":"ref55","article-title":"SGDR: Stochastic gradient descent with warm restarts","author":"Loshchilov","year":"2016","journal-title":"arXiv:1608.03983"},{"key":"ref56","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014","journal-title":"arXiv:1412.6980"},{"key":"ref57","first-page":"1","article-title":"SLTUNET: A simple unified model for sign language translation","volume-title":"Proc. 11th Int. Conf. Learn. Represent.","author":"Zhang"},{"key":"ref58","article-title":"YouTube-ASL: A large-scale, open-domain American sign language-english parallel corpus","author":"Uthus","year":"2023","journal-title":"arXiv:2306.15162"},{"key":"ref59","article-title":"Sign language translation based on transformers for the how2sign dataset","volume-title":"Image Processing Group Signal Theory and Communications","author":"Alvarez","year":"2022"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW59228.2023.00596"},{"key":"ref61","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV48922.2021.01111"},{"issue":"107","key":"ref62","first-page":"1","article-title":"Beyond english-centric multilingual machine translation","volume":"22","author":"Fan","year":"2021","journal-title":"J. Mach. Learn. Res."},{"key":"ref63","first-page":"483","article-title":"MT5: A massively multilingual pre-trained text-to-text transformer","volume-title":"Proc. Conf. North Amer. Chapter Assoc. Comput. Linguistics, Hum. Lang. Technol.","author":"Xue"},{"key":"ref64","article-title":"Multilingual translation with extensible multilingual pretraining and finetuning","author":"Tang","year":"2020","journal-title":"arXiv:2008.00401"},{"key":"ref65","first-page":"1","article-title":"LoRA: Low-rank adaptation of large language models","volume-title":"Proc. Int. Conf. Learn. Represent.","author":"Hu"}],"container-title":["IEEE Transactions on Circuits and Systems for Video Technology"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/76\/11154820\/10933970.pdf?arnumber=10933970","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,10]],"date-time":"2025-09-10T17:49:15Z","timestamp":1757526555000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10933970\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9]]},"references-count":65,"journal-issue":{"issue":"9"},"URL":"https:\/\/doi.org\/10.1109\/tcsvt.2025.3553052","relation":{},"ISSN":["1051-8215","1558-2205"],"issn-type":[{"value":"1051-8215","type":"print"},{"value":"1558-2205","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9]]}}}