{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,6]],"date-time":"2026-05-06T16:00:36Z","timestamp":1778083236853,"version":"3.51.4"},"publisher-location":"Cham","reference-count":85,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031729669","type":"print"},{"value":"9783031729676","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:00:00Z","timestamp":1730592000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,3]],"date-time":"2024-11-03T00:00:00Z","timestamp":1730592000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-72967-6_3","type":"book-chapter","created":{"date-parts":[[2024,11,2]],"date-time":"2024-11-02T19:03:19Z","timestamp":1730574199000},"page":"36-54","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":9,"title":["A Simple Baseline for\u00a0Spoken Language to\u00a0Sign Language Translation with\u00a03D Avatars"],"prefix":"10.1007","author":[{"given":"Ronglai","family":"Zuo","sequence":"first","affiliation":[]},{"given":"Fangyun","family":"Wei","sequence":"additional","affiliation":[]},{"given":"Zenggui","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Brian","family":"Mak","sequence":"additional","affiliation":[]},{"given":"Jiaolong","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Xin","family":"Tong","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,3]]},"reference":[{"key":"3_CR1","doi-asserted-by":"crossref","unstructured":"Albanie, S., et al.: BSL-1K: scaling up co-articulated sign language recognition using mouthing cues. In: ECCV, pp. 35\u201353 (2020)","DOI":"10.1007\/978-3-030-58621-8_3"},{"key":"3_CR2","doi-asserted-by":"crossref","unstructured":"Bao, J., Chen, D., Wen, F., Li, H., Hua, G.: CVAE-GAN: fine-grained image generation through asymmetric training. In: ICCV, pp. 2745\u20132754 (2017)","DOI":"10.1109\/ICCV.2017.299"},{"key":"3_CR3","unstructured":"Cai, Z., et al.: Smpler-x: scaling up expressive human pose and shape estimation. In: NeurIPS (2023)"},{"key":"3_CR4","doi-asserted-by":"crossref","unstructured":"Camgoz, N.C., Hadfield, S., Koller, O., Ney, H., Bowden, R.: Neural sign language translation. In: CVPR0, pp. 7784\u20137793 (2018)","DOI":"10.1109\/CVPR.2018.00812"},{"key":"3_CR5","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"301","DOI":"10.1007\/978-3-030-66823-5_18","volume-title":"Computer Vision \u2013 ECCV 2020 Workshops","author":"NC Camgoz","year":"2020","unstructured":"Camgoz, N.C., Koller, O., Hadfield, S., Bowden, R.: Multi-channel transformers for multi-articulatory sign language translation. In: Bartoli, A., Fusiello, A. (eds.) ECCV 2020. LNCS, vol. 12538, pp. 301\u2013319. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-66823-5_18"},{"key":"3_CR6","doi-asserted-by":"crossref","unstructured":"Camg\u00f6z, N.C., Koller, O., Hadfield, S., Bowden, R.: Sign language transformers: joint end-to-end sign language recognition and translation. In: CVPR, pp. 10020\u201310030 (2020)","DOI":"10.1109\/CVPR42600.2020.01004"},{"key":"3_CR7","unstructured":"Chen, T., Kornblith, S., Norouzi, M., Hinton, G.: A simple framework for contrastive learning of visual representations. In: ICML, pp. 1597\u20131607. PMLR (2020)"},{"key":"3_CR8","doi-asserted-by":"crossref","unstructured":"Chen, Y., Wei, F., Sun, X., Wu, Z., Lin, S.: A simple multi-modality transfer learning baseline for sign language translation. In: CVPR, pp. 5120\u20135130 (2022)","DOI":"10.1109\/CVPR52688.2022.00506"},{"key":"3_CR9","unstructured":"Chen, Y., Zuo, R., Wei, F., Wu, Y., Liu, S., Mak, B.: Two-stream network for sign language recognition and translation. In: NeurIPS (2022)"},{"key":"3_CR10","doi-asserted-by":"crossref","unstructured":"Cheng, K.L., Yang, Z., Chen, Q., Tai, Y.: Fully convolutional networks for continuous sign language recognition. In: ECCV, vol. 12369, pp. 697\u2013714 (2020)","DOI":"10.1007\/978-3-030-58586-0_41"},{"key":"3_CR11","doi-asserted-by":"crossref","unstructured":"Cheng, Y., Wei, F., Bao, J., Chen, D., Zhang, W.: CiCo: domain-aware sign language retrieval via cross-lingual contrastive learning. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01823"},{"key":"3_CR12","unstructured":"Blender - a 3D modelling and rendering package. Blender Foundation, Stichting Blender Foundation, Amsterdam (2018). http:\/\/www.blender.org"},{"key":"3_CR13","doi-asserted-by":"crossref","unstructured":"Cui, R., Liu, H., Zhang, C.: A deep neural framework for continuous sign language recognition by iterative training. IEEE TMM (2019)","DOI":"10.1109\/TMM.2018.2889563"},{"key":"3_CR14","doi-asserted-by":"crossref","unstructured":"Duarte, A., Albanie, S., Gir\u00f3-i Nieto, X., Varol, G.: Sign language video retrieval with free-form textual queries. In: CVPR, pp. 14094\u201314104 (2022)","DOI":"10.1109\/CVPR52688.2022.01370"},{"key":"3_CR15","doi-asserted-by":"crossref","unstructured":"Duarte, A., et al.: How2sign: a large-scale multimodal dataset for continuous American sign language. In: CVPR, pp. 2735\u20132744 (2021)","DOI":"10.1109\/CVPR46437.2021.00276"},{"key":"3_CR16","unstructured":"Fang, S., Sui, C., Zhang, X., Tian, Y.: Signdiff: learning diffusion models for American sign language production (2023)"},{"key":"3_CR17","doi-asserted-by":"crossref","unstructured":"Forte, M.P., et al.: Reconstructing signing avatars from video using linguistic priors. In: CVPR, pp. 12791\u201312801 (2023)","DOI":"10.1109\/CVPR52729.2023.01230"},{"key":"3_CR18","doi-asserted-by":"crossref","unstructured":"Gan, S., Yin, Y., Jiang, Z., Xia, K., Xie, L., Lu, S.: Contrastive learning for sign language recognition and translation. In: IJCAI, pp. 763\u2013772 (2023)","DOI":"10.24963\/ijcai.2023\/85"},{"key":"3_CR19","first-page":"5","volume":"4","author":"S Geman","year":"1987","unstructured":"Geman, S.: Statistical methods for tomographic image reconstruction. Bull. Int. Stat. Inst. 4, 5\u201321 (1987)","journal-title":"Bull. Int. Stat. Inst."},{"key":"3_CR20","doi-asserted-by":"crossref","unstructured":"Graves, A., Fern\u00e1ndez, S., Gomez, F., Schmidhuber, J.: Connectionist temporal classification: labelling unsegmented sequence data with recurrent neural networks. In: ICML, pp. 369\u2013376 (2006)","DOI":"10.1145\/1143844.1143891"},{"key":"3_CR21","doi-asserted-by":"crossref","unstructured":"Hao, A., Min, Y., Chen, X.: Self-mutual distillation learning for continuous sign language recognition. In: ICCV, pp. 11303\u201311312 (2021)","DOI":"10.1109\/ICCV48922.2021.01111"},{"key":"3_CR22","doi-asserted-by":"crossref","unstructured":"Hu, H., Zhao, W., Zhou, W., Li, H.: Signbert+: hand-model-aware self-supervised pre-training for sign language understanding. IEEE TPAMI (2023)","DOI":"10.1109\/TPAMI.2023.3269220"},{"key":"3_CR23","doi-asserted-by":"crossref","unstructured":"Hu, H., Zhou, W., Li, H.: Hand-model-aware sign language recognition. In: AAAI, vol. 35, pp. 1558\u20131566 (2021)","DOI":"10.1609\/aaai.v35i2.16247"},{"key":"3_CR24","doi-asserted-by":"crossref","unstructured":"Hu, L., Gao, L., Feng, W., et\u00a0al.: Self-emphasizing network for continuous sign language recognition. In: AAAI (2023)","DOI":"10.1109\/CVPR52729.2023.00249"},{"key":"3_CR25","doi-asserted-by":"crossref","unstructured":"Hu, L., Gao, L., Liu, Z., Feng, W.: Continuous sign language recognition with correlation network. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.00249"},{"key":"3_CR26","doi-asserted-by":"crossref","unstructured":"Huang, W., Pan, W., Zhao, Z., Tian, Q.: Towards fast and high-quality sign language production. In: MM, pp. 3172\u20133181 (2021)","DOI":"10.1145\/3474085.3475463"},{"key":"3_CR27","doi-asserted-by":"crossref","unstructured":"Huang, W., Zhao, Z., He, J., Zhang, M.: Dualsign: semi-supervised sign language production with balanced multi-modal multi-task dual transformation. In: MM, pp. 5486\u20135495 (2022)","DOI":"10.1145\/3503161.3547957"},{"key":"3_CR28","unstructured":"Hwang, E., Kim, J.H., Park, J.C.: Non-autoregressive sign language production with gaussian space. In: BMVC (2021)"},{"key":"3_CR29","doi-asserted-by":"crossref","unstructured":"Jiang, S., Sun, B., Wang, L., Bai, Y., Li, K., Fu, Y.: Skeleton aware multi-modal sign language recognition. In: CVPRW, pp. 3413\u20133423 (2021)","DOI":"10.1109\/CVPRW53098.2021.00380"},{"key":"3_CR30","doi-asserted-by":"crossref","unstructured":"Jin, S., et al.: Whole-body human pose estimation in the wild. In: ECCV, pp. 196\u2013214 (2020)","DOI":"10.1007\/978-3-030-58545-7_12"},{"key":"3_CR31","doi-asserted-by":"crossref","unstructured":"Joo, H., Simon, T., Sheikh, Y.: Total capture: a 3D deformation model for tracking faces, hands, and bodies. In: CVPR, pp. 8320\u20138329 (2018)","DOI":"10.1109\/CVPR.2018.00868"},{"key":"3_CR32","unstructured":"Joze, H.R.V., Koller, O.: MS-ASL: A large-scale data set and benchmark for understanding American sign language. In: BMVC (2019)"},{"key":"3_CR33","doi-asserted-by":"crossref","unstructured":"Kanazawa, A., Black, M.J., Jacobs, D.W., Malik, J.: End-to-end recovery of human shape and pose. In: CVPR, pp. 7122\u20137131 (2018)","DOI":"10.1109\/CVPR.2018.00744"},{"key":"3_CR34","unstructured":"Kingma, D.P., Ba, J.: Adam: a method for stochastic optimization. In: ICLR (2015)"},{"key":"3_CR35","series-title":"Lecture Notes in Computer Science (Lecture Notes in Artificial Intelligence)","doi-asserted-by":"publisher","first-page":"113","DOI":"10.1007\/978-3-642-23974-8_13","volume-title":"Intelligent Virtual Agents","author":"M Kipp","year":"2011","unstructured":"Kipp, M., Heloir, A., Nguyen, Q.: Sign language avatars: animation and comprehensibility. In: Vilhj\u00e1lmsson, H.H., Kopp, S., Marsella, S., Th\u00f3risson, K.R. (eds.) IVA 2011. LNCS (LNAI), vol. 6895, pp. 113\u2013126. Springer, Heidelberg (2011). https:\/\/doi.org\/10.1007\/978-3-642-23974-8_13"},{"key":"3_CR36","doi-asserted-by":"crossref","unstructured":"Lassner, C., Romero, J., Kiefel, M., Bogo, F., Black, M.J., Gehler, P.V.: Unite the people: closing the loop between 3D and 2D human representations. In: CVPR, pp. 6050\u20136059 (2017)","DOI":"10.1109\/CVPR.2017.500"},{"key":"3_CR37","doi-asserted-by":"crossref","unstructured":"Lee, T., Oh, Y., Lee, K.M.: Human part-wise 3D motion context learning for sign language recognition. In: ICCV, pp. 20740\u201320750 (2023)","DOI":"10.1109\/ICCV51070.2023.01896"},{"key":"3_CR38","doi-asserted-by":"crossref","unstructured":"Li, D., Rodriguez, C., Yu, X., Li, H.: Word-level deep sign language recognition from video: a new large-scale dataset and methods comparison. In: WACV, pp. 1459\u20131469 (2020)","DOI":"10.1109\/WACV45572.2020.9093512"},{"key":"3_CR39","unstructured":"Li, D., et al.: Tspnet: hierarchical feature learning via temporal semantic pyramid for sign language translation. In: NeurIPS, vol.\u00a033, pp. 12034\u201312045 (2020)"},{"key":"3_CR40","doi-asserted-by":"crossref","unstructured":"Li, D., Yu, X., Xu, C., Petersson, L., Li, H.: Transferring cross-domain knowledge for video sign language recognition. In: CVPR, pp. 6205\u20136214 (2020)","DOI":"10.1109\/CVPR42600.2020.00624"},{"key":"3_CR41","doi-asserted-by":"crossref","unstructured":"Lin, J., Zeng, A., Wang, H., Zhang, L., Li, Y.: One-stage 3D whole-body mesh recovery with component aware transformer. In: CVPR, pp. 21159\u201321168 (2023)","DOI":"10.1109\/CVPR52729.2023.02027"},{"key":"3_CR42","doi-asserted-by":"publisher","first-page":"726","DOI":"10.1162\/tacl_a_00343","volume":"8","author":"Y Liu","year":"2020","unstructured":"Liu, Y., et al.: Multilingual denoising pre-training for neural machine translation. TACL 8, 726\u2013742 (2020)","journal-title":"TACL"},{"key":"3_CR43","doi-asserted-by":"publisher","first-page":"551","DOI":"10.1007\/s10209-015-0407-2","volume":"15","author":"J McDonald","year":"2016","unstructured":"McDonald, J., et al.: An automated technique for real-time production of lifelike animations of American sign language. Univ. Access Inf. Soc. 15, 551\u2013566 (2016)","journal-title":"Univ. Access Inf. Soc."},{"key":"3_CR44","doi-asserted-by":"crossref","unstructured":"Min, Y., Hao, A., Chai, X., Chen, X.: Visual alignment constraint for continuous sign language recognition. In: ICCV, pp. 11542\u201311551 (2021)","DOI":"10.1109\/ICCV48922.2021.01134"},{"key":"3_CR45","doi-asserted-by":"crossref","unstructured":"Momeni, L., Bull, H., Prajwal, K., Albanie, S., Varol, G., Zisserman, A.: Automatic dense annotation of large-vocabulary sign language videos. In: ECCV, pp. 671\u2013690 (2022)","DOI":"10.1007\/978-3-031-19833-5_39"},{"key":"3_CR46","doi-asserted-by":"crossref","unstructured":"Momeni, L., Varol, G., Albanie, S., Afouras, T., Zisserman, A.: Watch, read and lookup: learning to spot signs from multiple supervisors. In: ACCV (2020)","DOI":"10.1007\/978-3-030-69544-6_18"},{"key":"3_CR47","unstructured":"Niu, Z., Zuo, R., Mak, B., Wei, F.: A Hong Kong sign language corpus collected from sign-interpreted TV news. In: LREC-COLING, pp. 636\u2013646 (2024)"},{"key":"3_CR48","unstructured":"Nocedal, J., Wright, S.J.: Nonlinear equations. Numer. Optim. 270\u2013302 (2006)"},{"key":"3_CR49","doi-asserted-by":"crossref","unstructured":"Pavlakos, G., et al.: Expressive body capture: 3D hands, face, and body from a single image. In: CVPR, pp. 10975\u201310985 (2019)","DOI":"10.1109\/CVPR.2019.01123"},{"key":"3_CR50","doi-asserted-by":"crossref","unstructured":"Rastgoo, R., Kiani, K., Escalera, S., Sabokrou, M.: Sign language production: a review. In: CVPRW, pp. 3451\u20133461 (2021)","DOI":"10.1109\/CVPRW53098.2021.00384"},{"key":"3_CR51","unstructured":"Razavi, A., Van\u00a0den Oord, A., Vinyals, O.: Generating diverse high-fidelity images with VQ-VAE-2. In: NeurIPS, vol. 32 (2019)"},{"key":"3_CR52","doi-asserted-by":"crossref","unstructured":"Sak, H., et al.: Learning acoustic frame labeling for speech recognition with recurrent neural networks. In: ICASSP, pp. 4280\u20134284 (2015)","DOI":"10.1109\/ICASSP.2015.7178778"},{"key":"3_CR53","unstructured":"Saunders, B., Camg\u00f6z, N.C., Bowden, R.: Adversarial training for multi-channel sign language production. In: BMVC (2020)"},{"key":"3_CR54","doi-asserted-by":"crossref","unstructured":"Saunders, B., Camgoz, N.C., Bowden, R.: Progressive transformers for end-to-end sign language production. In: ECCV, pp. 687\u2013705 (2020)","DOI":"10.1007\/978-3-030-58621-8_40"},{"key":"3_CR55","doi-asserted-by":"crossref","unstructured":"Saunders, B., Camgoz, N.C., Bowden, R.: Anonysign: novel human appearance synthesis for sign language video anonymisation. In: FG 2021, pp.\u00a01\u20138 (2021)","DOI":"10.1109\/FG52635.2021.9666984"},{"issue":"7","key":"3_CR56","doi-asserted-by":"publisher","first-page":"2113","DOI":"10.1007\/s11263-021-01457-9","volume":"129","author":"B Saunders","year":"2021","unstructured":"Saunders, B., Camgoz, N.C., Bowden, R.: Continuous 3D multi-channel sign language production via progressive transformers and mixture density networks. IJCV 129(7), 2113\u20132135 (2021)","journal-title":"IJCV"},{"key":"3_CR57","doi-asserted-by":"crossref","unstructured":"Saunders, B., Camgoz, N.C., Bowden, R.: Mixed signals: sign language production via a mixture of motion primitives. In: ICCV, pp. 1919\u20131929 (2021)","DOI":"10.1109\/ICCV48922.2021.00193"},{"key":"3_CR58","doi-asserted-by":"crossref","unstructured":"Saunders, B., Camgoz, N.C., Bowden, R.: Signing at scale: learning to co-articulate signs for large-scale photo-realistic sign language production. In: CVPR, pp. 5141\u20135151 (2022)","DOI":"10.1109\/CVPR52688.2022.00508"},{"key":"3_CR59","doi-asserted-by":"crossref","unstructured":"Shmelkov, K., Schmid, C., Alahari, K.: How good is my GAN? In: ECCV, pp. 213\u2013229 (2018)","DOI":"10.1007\/978-3-030-01216-8_14"},{"key":"3_CR60","unstructured":"Stoll, S., Camg\u00f6z, N.C., Hadfield, S., Bowden, R.: Sign language production using neural machine translation and generative adversarial networks. In: BMVC (2018)"},{"issue":"4","key":"3_CR61","doi-asserted-by":"publisher","first-page":"891","DOI":"10.1007\/s11263-019-01281-2","volume":"128","author":"S Stoll","year":"2020","unstructured":"Stoll, S., Camgoz, N.C., Hadfield, S., Bowden, R.: Text2sign: towards sign language production using neural machine translation and generative adversarial networks. IJCV 128(4), 891\u2013908 (2020)","journal-title":"IJCV"},{"key":"3_CR62","doi-asserted-by":"crossref","unstructured":"Tang, S., Hong, R., Guo, D., Wang, M.: Gloss semantic-enhanced network with online back-translation for sign language production. In: MM, pp. 5630\u20135638 (2022)","DOI":"10.1145\/3503161.3547830"},{"key":"3_CR63","doi-asserted-by":"crossref","unstructured":"Tarr\u00e9s, L., G\u00e1llego, G.I., Duarte, A., Torres, J., Gir\u00f3-i Nieto, X.: Sign language translation from instructional videos. In: CVPRW, pp. 5624\u20135634 (2023)","DOI":"10.52591\/lxai2023061814"},{"key":"3_CR64","doi-asserted-by":"crossref","unstructured":"Varol, G., Momeni, L., Albanie, S., Afouras, T., Zisserman, A.: Read and attend: temporal localisation in sign language videos. In: CVPR, pp. 16857\u201316866 (2021)","DOI":"10.1109\/CVPR46437.2021.01658"},{"issue":"10","key":"3_CR65","doi-asserted-by":"publisher","first-page":"3349","DOI":"10.1109\/TPAMI.2020.2983686","volume":"43","author":"J Wang","year":"2020","unstructured":"Wang, J., et al.: Deep high-resolution representation learning for visual recognition. IEEE TPAMI 43(10), 3349\u20133364 (2020)","journal-title":"IEEE TPAMI"},{"key":"3_CR66","doi-asserted-by":"crossref","unstructured":"Wei, F., Chen, Y.: Improving continuous sign language recognition with cross-lingual signs. In: ICCV, pp. 23612\u201323621 (2023)","DOI":"10.1109\/ICCV51070.2023.02158"},{"key":"3_CR67","doi-asserted-by":"crossref","unstructured":"Xiang, D., Joo, H., Sheikh, Y.: Monocular total capture: posing face, body, and hands in the wild. In: CVPR, pp. 10965\u201310974 (2019)","DOI":"10.1109\/CVPR.2019.01122"},{"key":"3_CR68","unstructured":"Xie, P., Zhang, Q., Li, Z., Tang, H., Du, Y., Hu, X.: Vector quantized diffusion model with codeunet for text-to-sign pose sequences generation (2023)"},{"key":"3_CR69","doi-asserted-by":"crossref","unstructured":"Xu, H., Bazavan, E.G., Zanfir, A., Freeman, W.T., Sukthankar, R., Sminchisescu, C.: GHUM & GHUML: generative 3D human shape and articulated pose models. In: CVPR, pp. 6184\u20136193 (2020)","DOI":"10.1109\/CVPR42600.2020.00622"},{"key":"3_CR70","doi-asserted-by":"crossref","unstructured":"Xu, Y., Zhu, S.C., Tung, T.: Denserac: joint 3D pose and shape estimation by dense render-and-compare. In: ICCV, pp. 7760\u20137770 (2019)","DOI":"10.1109\/ICCV.2019.00785"},{"key":"3_CR71","doi-asserted-by":"crossref","unstructured":"Yao, H., Zhou, W., Feng, H., Hu, H., Zhou, H., Li, H.: Sign language translation with iterative prototype. In: ICCV, pp. 15592\u201315601 (2023)","DOI":"10.1109\/ICCV51070.2023.01429"},{"key":"3_CR72","doi-asserted-by":"crossref","unstructured":"Yin, A., et al.: Simulslt: end-to-end simultaneous sign language translation. In: MM, pp. 4118\u20134127 (2021)","DOI":"10.1145\/3474085.3475544"},{"key":"3_CR73","doi-asserted-by":"crossref","unstructured":"Yin, K., Read, J.: Better sign language translation with STMC-transformer. COLING (2020)","DOI":"10.18653\/v1\/2020.coling-main.525"},{"key":"3_CR74","doi-asserted-by":"crossref","unstructured":"Yu, P., Zhang, L., Fu, B., Chen, Y.: Efficient sign language translation with a curriculum-based non-autoregressive decoder. In: IJCAI, pp. 5260\u20135268 (2023)","DOI":"10.24963\/ijcai.2023\/584"},{"key":"3_CR75","doi-asserted-by":"crossref","unstructured":"Zelinka, J., Kanis, J.: Neural sign language synthesis: words are our glosses. In: WACV, pp. 3395\u20133403 (2020)","DOI":"10.1109\/WACV45572.2020.9093516"},{"key":"3_CR76","doi-asserted-by":"crossref","unstructured":"Zhang, L., Rao, A., Agrawala, M.: Adding conditional control to text-to-image diffusion models. In: ICCV, pp. 3836\u20133847 (2023)","DOI":"10.1109\/ICCV51070.2023.00355"},{"key":"3_CR77","doi-asserted-by":"crossref","unstructured":"Zhao, W., Hu, H., Zhou, W., Shi, J., Li, H.: BEST: BERT pre-training for sign language recognition with coupling tokenization. In: AAAI (2023)","DOI":"10.1609\/aaai.v37i3.25470"},{"key":"3_CR78","doi-asserted-by":"crossref","unstructured":"Zheng, J., et al.: CVT-SLR: contrastive visual-textual transformation for sign language recognition with variational alignment. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.02216"},{"key":"3_CR79","doi-asserted-by":"crossref","unstructured":"Zhou, B., et al.: Gloss-free sign language translation: Improving from visual-language pretraining. In: ICCV, pp. 20871\u201320881 (2023)","DOI":"10.1109\/ICCV51070.2023.01908"},{"key":"3_CR80","doi-asserted-by":"crossref","unstructured":"Zhou, H., Zhou, W., Qi, W., Pu, J., Li, H.: Improving sign language translation with monolingual data by sign back-translation. In: CVPR, pp. 1316\u20131325 (2021)","DOI":"10.1109\/CVPR46437.2021.00137"},{"key":"3_CR81","doi-asserted-by":"crossref","unstructured":"Zhou, H., Zhou, W., Zhou, Y., Li, H.: Spatial-temporal multi-cue network for continuous sign language recognition. In: AAAI, pp. 13009\u201313016 (2020)","DOI":"10.1609\/aaai.v34i07.7001"},{"key":"3_CR82","doi-asserted-by":"crossref","unstructured":"Zuo, R., Mak, B.: C2SLR: consistency-enhanced continuous sign language recognition. In: CVPR, pp. 5131\u20135140 (2022)","DOI":"10.1109\/CVPR52688.2022.00507"},{"issue":"6","key":"3_CR83","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3640815","volume":"20","author":"R Zuo","year":"2024","unstructured":"Zuo, R., Mak, B.: Improving continuous sign language recognition with consistency constraints and signer removal. ACM TOMM 20(6), 1\u201325 (2024)","journal-title":"ACM TOMM"},{"key":"3_CR84","doi-asserted-by":"crossref","unstructured":"Zuo, R., Wei, F., Mak, B.: Natural language-assisted sign language recognition. In: CVPR (2023)","DOI":"10.1109\/CVPR52729.2023.01430"},{"key":"3_CR85","doi-asserted-by":"crossref","unstructured":"Zuo, R., Wei, F., Mak, B.: Towards online sign language recognition and translation. arXiv preprint arXiv:2401.05336 (2024)","DOI":"10.18653\/v1\/2024.emnlp-main.619"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72967-6_3","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,30]],"date-time":"2024-11-30T18:17:22Z","timestamp":1732990642000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72967-6_3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,3]]},"ISBN":["9783031729669","9783031729676"],"references-count":85,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72967-6_3","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,3]]},"assertion":[{"value":"3 November 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Milan","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Italy","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"29 September 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"4 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"18","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2024.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}