{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,5]],"date-time":"2026-02-05T22:25:50Z","timestamp":1770330350042,"version":"3.49.0"},"reference-count":62,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,12,22]],"date-time":"2025-12-22T00:00:00Z","timestamp":1766361600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,12,29]],"date-time":"2025-12-29T00:00:00Z","timestamp":1766966400000},"content-version":"vor","delay-in-days":7,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62073061"],"award-info":[{"award-number":["62073061"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100021171","name":"Guangdong Basic and Applied Basic Research Foundation","doi-asserted-by":"crossref","award":["2025A1515011602"],"award-info":[{"award-number":["2025A1515011602"]}],"id":[{"id":"10.13039\/501100021171","id-type":"DOI","asserted-by":"crossref"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J. King Saud Univ. Comput. Inf. Sci."],"published-print":{"date-parts":[[2026,1]]},"DOI":"10.1007\/s44443-025-00418-3","type":"journal-article","created":{"date-parts":[[2025,12,22]],"date-time":"2025-12-22T09:24:25Z","timestamp":1766395465000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["CCC: cross-modal contrastive creator for end-to-end sign language generation"],"prefix":"10.1007","volume":"38","author":[{"given":"Wang","family":"Yi","sequence":"first","affiliation":[]},{"given":"Ying","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4808-2175","authenticated-orcid":false,"given":"Lu","family":"Meng","sequence":"additional","affiliation":[]},{"given":"Chengchen","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Xuejie","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Shuoqian","family":"Gao","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,12,22]]},"reference":[{"key":"418_CR1","doi-asserted-by":"publisher","unstructured":"Ajagbe S A, Oladosu J\u00a0B, Olayiwola A\u00a0A, Falohun A\u00a0S (2024) Design and Development of Automatic Speech Recognition (ASR) System for Low-resource Language Using Convolutional Neural Network Model. The J Comput Sci Appl, 31(2):10\u201318 https:\/\/doi.org\/10.4314\/jcsia.v31i2.12","DOI":"10.4314\/jcsia.v31i2.12"},{"key":"418_CR2","doi-asserted-by":"publisher","unstructured":"Akinlade O, Vakaj E, Dridi A, Tiwari S, Ortiz-Rodr\u00edguez F (2023) Semantic Segmentation of the Lung to Examine the Effect of COVID-19 Using UNET Model. In: Applied Machine Learning and Data Analytics \u2013 5th International Conference, AMLDA 2022, Reynosa, Tamaulipas, Mexico, December 22\u201323, 2022, Revised Selected Papers. Jabbar M A, Ortiz-Rodrguez F, Tiwari S, Siarry P, editors, volume 1818 of Communications in Computer and Information Science, pp 52\u201363. Springer, Cham, https:\/\/doi.org\/10.1007\/978-3-031-34222-6_5","DOI":"10.1007\/978-3-031-34222-6_5"},{"key":"418_CR3","doi-asserted-by":"crossref","unstructured":"Arkushin R S, Moryossef A, Fried O (2023) Ham2Pose: Animating sign language notation into pose sequences, In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Vancouver, Canada, pp 21046\u201321056. IEEE","DOI":"10.1109\/CVPR52729.2023.02016"},{"key":"418_CR4","doi-asserted-by":"crossref","unstructured":"Baltatzis V, Potamias R\u00a0A, Ververas E, Sun G, Deng J, Zafeiriou S (2024) Neural Sign Actors: A Diffusion Model for 3D Sign Language Production from Text, In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp\u00a01985\u20131995","DOI":"10.1109\/CVPR52733.2024.00194"},{"key":"418_CR5","doi-asserted-by":"crossref","unstructured":"Baltatzis V, Potamias R A, Ververas E, Sun G, Deng J, Zafeiriou S (2024) Neural Sign Actors: A diffusion model for 3D sign language production from text, In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Seattle, WA, USA, pp 1985\u20131995. IEEE","DOI":"10.1109\/CVPR52733.2024.00194"},{"key":"418_CR6","doi-asserted-by":"crossref","unstructured":"Camgoz N C, Hadfield S, Koller O, Ney H, Bowden R (2018) Neural sign language translation, In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Salt Lake City, Utah, USA, IEEE Comput Soc, pp 7784\u20137793","DOI":"10.1109\/CVPR.2018.00812"},{"key":"418_CR7","doi-asserted-by":"crossref","unstructured":"Chakladar D\u00a0D, Kumar P, Mandal S, Roy P\u00a0P, Iwamura M, Kim B-G (2021) 3D Avatar Approach for Continuous Sign Movement Using Speech\/Text, Appl Sci, vol\u00a011, no\u00a08, p\u00a03439,","DOI":"10.3390\/app11083439"},{"key":"418_CR8","doi-asserted-by":"crossref","unstructured":"Cheng Y, Wei F, Bao J, Chen D, Zhang W (2023) CICO: Domain-aware sign language retrieval via cross-lingual contrastive learning, In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Vancouver, Canada, pp 19016\u201319026. IEEE","DOI":"10.1109\/CVPR52729.2023.01823"},{"key":"418_CR9","doi-asserted-by":"crossref","unstructured":"Chen S, Wang Q, Wang Q (2024) Semantic-driven Diffusion for Sign Language Production with Gloss-Pose Latent Spaces Alignment. Comput Vis Image Underst 246:104050","DOI":"10.1016\/j.cviu.2024.104050"},{"key":"418_CR10","doi-asserted-by":"crossref","unstructured":"Devlin J, Chang M-W, Lee K, Toutanova K (2019) BERT: Pre-training of deep bidirectional transformers for language understanding, In: Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT), pp 4171\u20134186","DOI":"10.18653\/v1\/N19-1423"},{"key":"418_CR11","doi-asserted-by":"publisher","unstructured":"Dong L, Wang X, Nwogu I (2024) Word-Conditioned 3D American Sign Language Motion Generation. In: Findings of the Association for Computational Linguistics: EMNLP 2024, pp 9993\u20139999, Miami, Florida, USA. Association for Computational Linguistics, https:\/\/doi.org\/10.18653\/v1\/2024.findings-emnlp.584","DOI":"10.18653\/v1\/2024.findings-emnlp.584"},{"key":"418_CR12","unstructured":"Fang S, Chen C, Wang L, Zheng C, Sui C, Tian Y (2024) SignLLM: Sign Language Production Large Language Models, arXiv:2405.10718"},{"key":"418_CR13","doi-asserted-by":"crossref","unstructured":"Glauert J RW, Elliott R, Cox S J, Tryggvason J, Sheard M (2006) Vanessa-a System For Communication Between Deaf And Hearing People, Technol Disab 18(4):207\u2013216","DOI":"10.3233\/TAD-2006-18408"},{"key":"418_CR14","unstructured":"Glorot X, Bengio Y (2010) Understanding the difficulty of training deep feedforward neural networks, In: Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics (AISTATS), pp 249\u2013256. JMLR W&CP"},{"key":"418_CR15","unstructured":"Goodfellow I J, Pouget-Abadie J, Mirza M, Xu B, Warde-Farley D, Ozair S, Courville A, Bengio Y (2014) Generative adversarial nets, In: Advances in Neural Information Processing Systems (NeurIPS), vol 27,"},{"key":"418_CR16","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2015) Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification, In: Proceedings of the IEEE International Conference on Computer Vision (ICCV), pp 1026\u20131034","DOI":"10.1109\/ICCV.2015.123"},{"key":"418_CR17","doi-asserted-by":"crossref","unstructured":"Huynh T, Kornblith S, Walter M R, Maire M, Khademi M (2022) Boosting contrastive self-supervised learning with false negative cancellation, In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV), pp 2785\u20132795","DOI":"10.1109\/WACV51458.2022.00106"},{"key":"418_CR18","doi-asserted-by":"crossref","unstructured":"Hwang E J, Kim J-H, Park J C (2021) Non-Autoregressive Sign Language Production with Gaussian Space, In: British Machine Vision Conference (BMVC), paper 197","DOI":"10.5244\/C.35.310"},{"key":"418_CR19","unstructured":"Hwang E\u00a0J, Lee H, Park J\u00a0C (2023) Autoregressive Sign Language Production: A Gloss-Free Approach with Discrete Representations, arXiv:2309.12179"},{"key":"418_CR20","unstructured":"Jiang T, Lu P, Zhang L, Ma N, Han R, Lyu C, Li Y, Chen K (2023) RTMPose: Real-time multi-person pose estimation based on MMPose, arXiv:2303.07399"},{"key":"418_CR21","unstructured":"Kim J-H, Hwang E J, Cho S, Lee D H, Park J C (2022) Sign language production with avatar layering: A critical use case over rare words, In: Proceedings of the Thirteenth Language Resources and Evaluation Conference (LREC), Marseille, France, pp 1519\u20131528. European Language Resources Association (ELRA)"},{"key":"418_CR22","doi-asserted-by":"crossref","unstructured":"Kim Y (2014) Convolutional neural networks for sentence classification, arXiv:1408.5882","DOI":"10.3115\/v1\/D14-1181"},{"key":"418_CR23","unstructured":"Kim M, Shim S-W, Lee B-J (2025) FALCON: False-Negative Aware Learning of Contrastive Negatives in Vision-Language Pretraining, arXiv:2505.11192"},{"key":"418_CR24","unstructured":"Kingma D P, Welling M (2013) Auto-encoding variational Bayes, arXiv:1312.6114"},{"issue":"5","key":"418_CR25","doi-asserted-by":"publisher","first-page":"467","DOI":"10.1080\/01449290903420192","volume":"29","author":"D Kouremenos","year":"2010","unstructured":"Kouremenos D, Fotinea S-E, Efthimiou E, Ntalianis K (2010) A prototype Greek text to Greek Sign Language conversion system. Behav Inf Technol 29(5):467\u2013481","journal-title":"Behav Inf Technol"},{"key":"418_CR26","doi-asserted-by":"crossref","unstructured":"Liang S, Li Y, Xin W, Chen H, Liu X, Liu K, Miao Q (2025) Generative Sign-description Prompts with Multi-positive Contrastive Learning for Sign Language Recognition, arXiv:2505.02304","DOI":"10.3390\/s25195957"},{"issue":"1","key":"418_CR27","first-page":"1","volume":"8","author":"H Lutzenberger","year":"2023","unstructured":"Lutzenberger H, Mudd K, Stamp R, Schembri A (2023) The social structure of signing communities and lexical variation: A cross-linguistic comparison of three unrelated sign languages. Glossa 8(1):1\u201340","journal-title":"Glossa"},{"key":"418_CR28","doi-asserted-by":"crossref","unstructured":"Ma J, Wang W, Yang Y, Zheng F (2024) MS2SL: Multimodal Spoken Data-Driven Continuous Sign Language Production, In: Findings of the Association for Computational Linguistics: ACL 2024, Bangkok, Thailand, pp\u00a07241\u20137254. Available: https:\/\/aclanthology.org\/2024.findings-acl.432\/","DOI":"10.18653\/v1\/2024.findings-acl.432"},{"key":"418_CR29","doi-asserted-by":"crossref","unstructured":"Moon J, Park J, Kim J, Bae J, Jeon H, Kim H Y (2025) DiffSLT: Enhancing Diversity in Sign Language Translation via Diffusion Model, Pattern Recognition Letters,","DOI":"10.2139\/ssrn.5144736"},{"key":"418_CR30","doi-asserted-by":"crossref","unstructured":"Pennington J, Socher R, Manning C D (2014) GloVe: Global vectors for word representation, In: Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp 1532\u20131543","DOI":"10.3115\/v1\/D14-1162"},{"key":"418_CR31","doi-asserted-by":"crossref","unstructured":"Pratikaki C, Fotinea S-E, Efthimiou E, Filntisis P\u00a0P, Roussos A, Maragos P (2025) Text-to-Sign Language Production via Intermediate Skeletal Representations Using Transformers and Neural Rendering, In: Adjunct Proceedings of the 25th ACM International Conference on Intelligent Virtual Agents (IVA\u00a0\u201925), pp\u00a033:1\u201333:8","DOI":"10.1145\/3742886.3756732"},{"key":"418_CR32","doi-asserted-by":"crossref","unstructured":"Qi F, Duan Y, Zhang H, Xu C (2024) SignGen: End-to-end sign language video generation with latent diffusion, In: European Conference on Computer Vision (ECCV), Milan, Italy, pp 252\u2013270. Springer, Cham","DOI":"10.1007\/978-3-031-73668-1_15"},{"key":"418_CR33","unstructured":"Radford A, Kim J W, Hallacy C, Ramesh A, Goh G, Agarwal S, Sastry G, Askell A, Mishkin P, Clark J, et al (2021) Learning transferable visual models from natural language supervision, In: Proceedings of the 38th International Conference on Machine Learning (ICML), pp 8748\u20138763. PMLR"},{"key":"418_CR34","unstructured":"Radford A, Kim J W, Xu T, Brockman G, McLeavey C, Sutskever I (2022) Robust Speech Recognition via Large-Scale Weak Supervision, arXiv:2212.04356"},{"key":"418_CR35","doi-asserted-by":"crossref","unstructured":"Rastgoo R, Kiani K, Escalera S, Sabokrou M (2021) Sign Language Production: A Review. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, pp 3451\u20133461, June 2021","DOI":"10.1109\/CVPRW53098.2021.00384"},{"issue":"7","key":"418_CR36","doi-asserted-by":"publisher","first-page":"2113","DOI":"10.1007\/s11263-021-01457-9","volume":"129","author":"B Saunders","year":"2021","unstructured":"Saunders B (2021) Necati Cihan Camgoz, and Richard Bowden, Continuous 3D multi-channel sign language production via progressive transformers and mixture density networks. Int J Comput Vis 129(7):2113\u20132135","journal-title":"Int J Comput Vis"},{"key":"418_CR37","doi-asserted-by":"crossref","unstructured":"Saunders B, Camgoz N C, Bowden R (2020) Adversarial training for multi-channel sign language production, arXiv:2008.12405","DOI":"10.5244\/C.34.63"},{"key":"418_CR38","doi-asserted-by":"crossref","unstructured":"Saunders B, Camgoz N\u00a0C, Bowden R (2020) Progressive Transformers for End-to-End Sign Language Production, In: Computer Vision \u2013 ECCV 2020, ser. Lecture Notes in Computer Science, vol\u00a012356. Springer, pp\u00a0687\u2013705","DOI":"10.1007\/978-3-030-58621-8_40"},{"key":"418_CR39","doi-asserted-by":"crossref","unstructured":"Saunders B, Camgoz N C, Bowden R (2020) Progressive transformers for end-to-end sign language production, In: European Conference on Computer Vision (ECCV), Glasgow, UK, pp 687\u2013705. Springer, Cham","DOI":"10.1007\/978-3-030-58621-8_40"},{"key":"418_CR40","doi-asserted-by":"crossref","unstructured":"Saunders B, Camgoz N\u00a0C, Bowden R (2021) Mixed SIGNals: Sign Language Production via a Mixture of Motion Primitives, In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), Montreal, Canada, pp\u00a01919\u20131929. Available: https:\/\/openaccess.thecvf.com\/content\/ICCV2021\/papers\/Saunders_Mixed_SIGNals_Sign_Language_Production_via_a_Mixture_of_Motion_ICCV_2021_paper.pdf","DOI":"10.1109\/ICCV48922.2021.00193"},{"key":"418_CR41","doi-asserted-by":"crossref","unstructured":"Saunders B, Camgoz N C, Bowden R (2022) Signing at scale: Learning to co-articulate signs for large-scale photo-realistic sign language production, In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), New Orleans, LA, USA, pp 5141\u20135151. IEEE","DOI":"10.1109\/CVPR52688.2022.00508"},{"key":"418_CR42","doi-asserted-by":"crossref","unstructured":"Stamp R, Schembri A, Fenlon J, Rentelis R, Woll B, Cormier K (2014) Lexical variation and change in British Sign Language. PLoS ONE 9(4):e94053","DOI":"10.1371\/journal.pone.0094053"},{"issue":"4","key":"418_CR43","doi-asserted-by":"publisher","first-page":"891","DOI":"10.1007\/s11263-019-01281-2","volume":"128","author":"S Stoll","year":"2020","unstructured":"Stoll S (2020) Necati Cihan Camgoz, Simon Hadfield, and Richard Bowden, Text2Sign: towards sign language production using neural machine translation and generative adversarial networks. Int J Comput Vis 128(4):891\u2013908","journal-title":"Int J Comput Vis"},{"key":"418_CR44","unstructured":"Stoll S, Camg\u00f6z N C, Hadfield S, et al (2019) Sign language production using neural machine translation and generative adversarial networks[C]. Proceed British Mach Vis Conf (BMVC). 1\u201312"},{"key":"418_CR45","doi-asserted-by":"crossref","unstructured":"Stoll S, Hadfield S, Bowden R (2020) SignSynth: Data-Driven Sign Language Video Generation, In: Computer Vision \u2013 ECCV 2020 Workshops, Lecture Notes in Computer Science, vol\u00a012536, Springer, pp\u00a0353\u2013370","DOI":"10.1007\/978-3-030-66823-5_21"},{"key":"418_CR46","doi-asserted-by":"publisher","unstructured":"Tang S, He J, Guo D, Wei Y, Li F, Hong R (2025) Sign-IDD: Iconicity disentangled diffusion for sign language production[C]. Proceed AAAI Conf Artif Intell, 39(7):7266\u20137274. https:\/\/doi.org\/10.1609\/aaai.v39i7.32781.","DOI":"10.1609\/aaai.v39i7.32781."},{"key":"418_CR47","doi-asserted-by":"crossref","unstructured":"Tang S, Xue F, Wu J, Wang S, Hong R (2025) Gloss-driven conditional diffusion models for sign language production, ACM Trans Multimed Comput, Commun, Appl, vol\u00a021, no\u00a04, pp\u00a0105:1\u2013105:17","DOI":"10.1145\/3663572"},{"key":"418_CR48","doi-asserted-by":"crossref","unstructured":"Tsai Y-H H, Bai S, Liang P P, Kolter J Z, Morency L-P, Salakhutdinov R (2019) Multimodal Transformer for Unaligned Multimodal Language Sequences, In: Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (ACL), pp 6558\u20136569","DOI":"10.18653\/v1\/P19-1656"},{"key":"418_CR49","unstructured":"van den Oord A, Vinyals O, et al (2017) Neural discrete representation learning, In: Advances in Neural Information Processing Systems (NeurIPS), vol 30"},{"key":"418_CR50","unstructured":"Vaswani A, Shazeer N, Parmar N, Uszkoreit J (2017) Llion Jones, Aidan N. Gomez, \u0141ukasz Kaiser, and Illia Polosukhin, Attention Is All You Need, In: Advances in Neural Information Processing Systems (NeurIPS), vol 30"},{"key":"418_CR51","doi-asserted-by":"publisher","unstructured":"Walsh H, Ravanshad A, Rahmani M, Bowden R (2024) A data-driven representation for sign language production[C]. Proceed 18th Int Conf Automat Face Gesture Recogn (FG 2024). Istanbul, Turkey: IEEE, 1\u201310. https:\/\/doi.org\/10.1109\/FG59268.2024.10581937.","DOI":"10.1109\/FG59268.2024.10581937."},{"key":"418_CR52","doi-asserted-by":"publisher","unstructured":"Wang C, Deng Z, Jiang Z, Shen F, Yin Y, Gan S, Cheng Z, Ge S, Gu Q (2025) Advanced Sign Language Video Generation with Compressed and Quantized Multi-Condition Tokenization. arXiv:2506.15980, https:\/\/doi.org\/10.48550\/arXiv.2506.15980","DOI":"10.48550\/arXiv.2506.15980"},{"key":"418_CR53","doi-asserted-by":"crossref","unstructured":"Wang X, Tang S, Song P, et al (2025) Linguistics-Vision Monotonic Consistent Network for Sign Language Production[C]. Proceed IEEE Int Conf Acoust, Speech Signal Process (ICASSP). IEEE","DOI":"10.1109\/ICASSP49660.2025.10890594"},{"key":"418_CR54","doi-asserted-by":"crossref","unstructured":"Wang X, Tang S, Song P, Wang S, Guo D, Hong R (2025) Linguistics-vision monotonic consistent network for sign language production, In: ICASSP 2025 \u2013 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp 1\u20135. IEEE","DOI":"10.1109\/ICASSP49660.2025.10890594"},{"key":"418_CR55","doi-asserted-by":"crossref","unstructured":"Xie P, Zhang Q, Peng T, Tang H, Yao D, Li Z (2024) G2P-DDM: Generating sign pose sequence from gloss sequence with discrete diffusion model. Proceed AAAI Conf Artif Intell 38(6):6234\u20136242","DOI":"10.1609\/aaai.v38i6.28441"},{"key":"418_CR56","doi-asserted-by":"crossref","unstructured":"Yin A, Li H, Shen K, Tang S, Zhuang Y (2024) T2S-GPT: Dynamic vector quantization for autoregressive sign language production from text, arXiv:2406.07119","DOI":"10.18653\/v1\/2024.acl-long.183"},{"key":"418_CR57","doi-asserted-by":"crossref","unstructured":"Yin A, Li H, Shen K, Tang S, Zhuang Y (2024) T2S-GPT: Dynamic Vector Quantization for Autoregressive Sign Language Production from Text, In: Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (ACL 2024)","DOI":"10.18653\/v1\/2024.acl-long.183"},{"key":"418_CR58","doi-asserted-by":"crossref","unstructured":"Yu Z, Huang S, Cheng Y, Birdal T (2024) SignAvatars: A Large-Scale 3D Sign Language Holistic Motion Dataset and Benchmark, In: Computer Vision \u2013 ECCV 2024, Springer, pp\u00a01\u201318","DOI":"10.1007\/978-3-031-72652-1_1"},{"issue":"6","key":"418_CR59","doi-asserted-by":"publisher","first-page":"4749","DOI":"10.1109\/JIOT.2020.3028574","volume":"8","author":"J Zhang","year":"2021","unstructured":"Zhang J, Li Y, Xiao W (2021) Integrated multiple kernel learning for device-free localization in cluttered environments using spatio-temporal information. IEEE Internet Things J 8(6):4749\u20134761","journal-title":"IEEE Internet Things J"},{"key":"418_CR60","doi-asserted-by":"crossref","unstructured":"Zhang J, Xue J, Li Y, Cotton SL (2025) Leveraging online learning for domain-adaptation in Wi-Fi-based device-free localization. IEEE Trans Mob Comput 24(8):7773\u20137787","DOI":"10.1109\/TMC.2025.3552538"},{"key":"418_CR61","doi-asserted-by":"crossref","unstructured":"Zhang J, Zhang Y, Cun X, Zhang Y, Zhao H, Lu H, Shen X, Shan Y (2023) Generating human motion from textual descriptions with discrete representations, In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Vancouver, Canada, pp 14730\u201314740. IEEE","DOI":"10.1109\/CVPR52729.2023.01415"},{"key":"418_CR62","doi-asserted-by":"crossref","unstructured":"Zheng J, Wang Y, Tan C, Li S, Wang G, Xia J, Chen Y, Li S Z (2023) CVT-SLR: Contrastive visual-textual transformation for sign language recognition with variational alignment, In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), Vancouver, BC, Canada, pp 23141\u201323150. IEEE","DOI":"10.1109\/CVPR52729.2023.02216"}],"container-title":["Journal of King Saud University Computer and Information Sciences"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s44443-025-00418-3","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s44443-025-00418-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s44443-025-00418-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,5]],"date-time":"2026-02-05T09:51:45Z","timestamp":1770285105000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s44443-025-00418-3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,12,22]]},"references-count":62,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2026,1]]}},"alternative-id":["418"],"URL":"https:\/\/doi.org\/10.1007\/s44443-025-00418-3","relation":{},"ISSN":["1319-1578","2213-1248"],"issn-type":[{"value":"1319-1578","type":"print"},{"value":"2213-1248","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,12,22]]},"assertion":[{"value":"11 October 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"4 December 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"22 December 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"We declare that we have no financial and personal relationships with other people or organizations that can inappropriately influence our work, and there is no professional or other personal interest of any nature or kind in any product, service, or company that could be construed as influencing the position presented in, or the review of, the manuscript entitled.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing Interest"}}],"article-number":"2"}}