{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,5]],"date-time":"2026-05-05T12:03:36Z","timestamp":1777982616699,"version":"3.51.4"},"reference-count":56,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T00:00:00Z","timestamp":1774051200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T00:00:00Z","timestamp":1774051200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["51405448"],"award-info":[{"award-number":["51405448"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int. J. Mach. Learn. &amp; Cyber."],"published-print":{"date-parts":[[2026,5]]},"DOI":"10.1007\/s13042-026-03002-x","type":"journal-article","created":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T06:11:50Z","timestamp":1774073510000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Dstadapter: divided spatial-temporal adapter fine-tuning method for sign language recognition"],"prefix":"10.1007","volume":"17","author":[{"given":"Qiuhong","family":"Tian","sequence":"first","affiliation":[]},{"given":"Yijie","family":"Yang","sequence":"additional","affiliation":[]},{"given":"Bin","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Jiacheng","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Junxiao","family":"Ning","sequence":"additional","affiliation":[]},{"given":"Lizao","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,3,21]]},"reference":[{"key":"3002_CR1","doi-asserted-by":"crossref","unstructured":"Santhalingam PS, Pathak P, Ko\u0161eck\u00e1 J, Rangwala H et al (2019) Sign language recognition analysis using multimodal data. In: 2019 IEEE International Conference on Data Science and Advanced Analytics (DSAA), pp. 203\u2013210 . IEEE","DOI":"10.1109\/DSAA.2019.00035"},{"key":"3002_CR2","doi-asserted-by":"crossref","unstructured":"Li D, Rodriguez C, Yu X, Li H (2020) Word-level deep sign language recognition from video: A new large-scale dataset and methods comparison. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 1459\u20131469","DOI":"10.1109\/WACV45572.2020.9093512"},{"key":"3002_CR3","first-page":"854","volume":"37","author":"L Hu","year":"2023","unstructured":"Hu L, Gao L, Liu Z, Feng W (2023) Self-emphasizing network for continuous sign language recognition. Proceed AAAI Conf Artif Intell 37:854\u2013862","journal-title":"Proceed AAAI Conf Artif Intell"},{"key":"3002_CR4","doi-asserted-by":"crossref","unstructured":"Liu Z, Qi X, Pang L (2018) Self-boosted gesture interactive system with st-net. In: Proceedings of the 26th ACM International Conference on Multimedia, pp. 145\u2013153","DOI":"10.1145\/3240508.3240530"},{"key":"3002_CR5","doi-asserted-by":"crossref","unstructured":"Hu L, Gao L, Liu Z, Feng W (2023) Continuous sign language recognition with correlation network. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 2529\u20132539","DOI":"10.1109\/CVPR52729.2023.00249"},{"key":"3002_CR6","doi-asserted-by":"crossref","unstructured":"Das S, Biswas SK, Purkayastha B (2024) Occlusion robust sign language recognition system for indian sign language using cnn and pose features. Multimedia Tools and Applications, 1\u201320","DOI":"10.21203\/rs.3.rs-2801772\/v1"},{"key":"3002_CR7","doi-asserted-by":"crossref","unstructured":"Amorim CC, Mac\u00eado D, Zanchettin C (2019) Spatial-temporal graph convolutional networks for sign language recognition. In: International Conference on Artificial Neural Networks, pp. 646\u2013657 . Springer","DOI":"10.1007\/978-3-030-30493-5_59"},{"key":"3002_CR8","doi-asserted-by":"crossref","unstructured":"Jiang S, Sun B, Wang L, Bai Y, Li K, Fu Y (2021) Skeleton aware multi-modal sign language recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 3413\u20133423","DOI":"10.1109\/CVPRW53098.2021.00380"},{"key":"3002_CR9","doi-asserted-by":"crossref","unstructured":"Camgoz NC, Hadfield S, Koller O, Ney H, Bowden R (2018) Neural sign language translation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7784\u20137793","DOI":"10.1109\/CVPR.2018.00812"},{"key":"3002_CR10","doi-asserted-by":"crossref","unstructured":"Camgoz NC, Koller O, Hadfield S, Bowden R (2020) Sign language transformers: Joint end-to-end sign language recognition and translation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10023\u201310033","DOI":"10.1109\/CVPR42600.2020.01004"},{"key":"3002_CR11","doi-asserted-by":"crossref","unstructured":"Selvaraj P, Nc G, Kumar P, Khapra M (2021) Openhands: Making sign language recognition accessible with pose-based pretrained models across languages. arXiv preprint arXiv:2110.05877","DOI":"10.18653\/v1\/2022.acl-long.150"},{"key":"3002_CR12","unstructured":"Yang T, Zhu Y, Xie Y, Zhang A, Chen C, Li M (2023) Aim: Adapting image models for efficient video action recognition. arXiv preprint arXiv:2302.03024"},{"key":"3002_CR13","unstructured":"Yu BX, Chang J, Liu L, Tian Q, Chen CW (2022) Towards a unified view on visual parameter-efficient transfer learning. arXiv preprint arXiv:2210.00788"},{"key":"3002_CR14","unstructured":"Radford A, Kim JW, Hallacy C, Ramesh A, Goh G, Agarwal S, Sastry G, Askell A, Mishkin P, Clark J et al (2021) Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763 . PMLR"},{"key":"3002_CR15","doi-asserted-by":"crossref","unstructured":"Sridhar A, Ganesan RG, Kumar P, Khapra M (2020) Include: A large scale dataset for indian sign language recognition. In: Proceedings of the 28th ACM International Conference on Multimedia, pp. 1366\u20131375","DOI":"10.1145\/3394171.3413528"},{"key":"3002_CR16","doi-asserted-by":"publisher","first-page":"181340","DOI":"10.1109\/ACCESS.2020.3028072","volume":"8","author":"OM Sincan","year":"2020","unstructured":"Sincan OM, Keles HY (2020) Autsl: a large scale multi-modal turkish sign language dataset and baseline methods. IEEE Access 8:181340\u2013181355","journal-title":"IEEE Access"},{"key":"3002_CR17","unstructured":"Kvanchiani K, Surovtsev P, Nagaev A, Petrova E, Kapitanov A (2024) Bukva: Russian sign language alphabet. arXiv preprint arXiv:2410.08675"},{"key":"3002_CR18","unstructured":"Ronchetti F, Quiroga FM, Estrebou C, Lanzarini L, Rosete A (2023) Lsa64: an argentinian sign language dataset. arXiv preprint arXiv:2310.17429"},{"key":"3002_CR19","doi-asserted-by":"publisher","first-page":"131","DOI":"10.1007\/s13042-017-0705-5","volume":"10","author":"MJ Cheok","year":"2019","unstructured":"Cheok MJ, Omar Z, Jaward MH (2019) A review of hand gesture and sign language recognition techniques. Int J Mach Learn Cybern 10:131\u2013153","journal-title":"Int J Mach Learn Cybern"},{"issue":"10","key":"3002_CR20","doi-asserted-by":"publisher","first-page":"2859","DOI":"10.1007\/s13042-021-01372-y","volume":"12","author":"RE Nogales","year":"2021","unstructured":"Nogales RE, Benalc\u00e1zar ME (2021) Hand gesture recognition using machine learning and infrared information: a systematic literature review. Int J Mach Learn Cybern 12(10):2859\u20132886","journal-title":"Int J Mach Learn Cybern"},{"key":"3002_CR21","doi-asserted-by":"crossref","unstructured":"Zuo R, Wei F, Mak B (2023) Natural language-assisted sign language recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14890\u201314900","DOI":"10.1109\/CVPR52729.2023.01430"},{"key":"3002_CR22","doi-asserted-by":"crossref","unstructured":"Rao GA, Syamala K, Kishore P, Sastry A (2018) Deep convolutional neural networks for sign language recognition. In: 2018 Conference on Signal Processing and Communication Engineering Systems (SPACES), pp. 194\u2013197 . IEEE","DOI":"10.1109\/SPACES.2018.8316344"},{"issue":"7","key":"3002_CR23","doi-asserted-by":"publisher","first-page":"10071","DOI":"10.1007\/s11042-022-12051-7","volume":"81","author":"X Han","year":"2022","unstructured":"Han X, Lu F, Tian G (2022) Efficient 3d cnns with knowledge transfer for sign language recognition. Multimed Tools App 81(7):10071\u201310090","journal-title":"Multimed Tools App"},{"key":"3002_CR24","first-page":"1","volume":"16","author":"Q Tian","year":"2024","unstructured":"Tian Q, Li S, Zhang Y, Lu H, Pan H (2024) Action recognition method based on a novel keyframe extraction method and enhanced 3d convolutional neural network. Int J Mach Learn Cybern 16:1\u201317","journal-title":"Int J Mach Learn Cybern"},{"key":"3002_CR25","doi-asserted-by":"publisher","unstructured":"Ahn J, Jang Y, Chung JS (2024) Slowfast network for continuous sign language recognition. In: ICASSP 2024\u20132024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3920\u20133924 . https:\/\/doi.org\/10.1109\/ICASSP48485.2024.10445841","DOI":"10.1109\/ICASSP48485.2024.10445841"},{"key":"3002_CR26","doi-asserted-by":"publisher","unstructured":"Renjith S, Manazhy R, Sumi Suresh MS (2024) Sign language recognition using bilstm model. In: 2024 IEEE 9th International Conference for Convergence in Technology (I2CT), pp. 1\u20135 . https:\/\/doi.org\/10.1109\/I2CT61223.2024.10543543","DOI":"10.1109\/I2CT61223.2024.10543543"},{"key":"3002_CR27","doi-asserted-by":"publisher","unstructured":"Parelli M, Papadimitriou K, Potamianos G, Pavlakos G, Maragos P (2022) Spatio-temporal graph convolutional networks for continuous sign language recognition. In: ICASSP 2022\u20132022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 8457\u20138461 . https:\/\/doi.org\/10.1109\/ICASSP43922.2022.9746971","DOI":"10.1109\/ICASSP43922.2022.9746971"},{"key":"3002_CR28","doi-asserted-by":"publisher","DOI":"10.3390\/s21041120","author":"L Meng","year":"2021","unstructured":"Meng L, Li R (2021) An attention-enhanced multi-scale and dual sign language recognition network based on a graph convolution network. Sensors. https:\/\/doi.org\/10.3390\/s21041120","journal-title":"Sensors"},{"key":"3002_CR29","doi-asserted-by":"crossref","unstructured":"Tian Q, Miao W, Zhang L, Yang Z, Yu Y, Zhao Y, Yao L (2024) Mcanet: a lightweight action recognition network with multidimensional convolution and attention. International Journal of Machine Learning and Cybernetics, 1\u201314","DOI":"10.21203\/rs.3.rs-4596829\/v1"},{"key":"3002_CR30","doi-asserted-by":"crossref","unstructured":"Pandey B, Sinha U, Nagwanshi KK (2025) A multi-stream framework using spatial-temporal collaboration learning networks for violence and non-violence classification in complex video environments. International Journal of Machine Learning and Cybernetics, 1\u201330","DOI":"10.1007\/s13042-025-02540-0"},{"key":"3002_CR31","doi-asserted-by":"publisher","DOI":"10.3390\/electronics13081509","author":"Y Kumar","year":"2024","unstructured":"Kumar Y, Huang K, Lin C-C, Watson A, Li JJ, Morreale P, Delgado J (2024) Applying swin architecture to diverse sign language datasets. Electronics. https:\/\/doi.org\/10.3390\/electronics13081509","journal-title":"Electronics"},{"key":"3002_CR32","first-page":"4","volume":"2","author":"G Bertasius","year":"2021","unstructured":"Bertasius G, Wang H, Torresani L (2021) Is space-time attention all you need for video understanding? In ICML 2:4","journal-title":"In ICML"},{"key":"3002_CR33","unstructured":"Houlsby N, Giurgiu A, Jastrzebski S, Morrone B, De Laroussilhe Q, Gesmundo A, Attariyan M, Gelly S (2019) Parameter-efficient transfer learning for nlp. In: International Conference on Machine Learning, pp. 2790\u20132799 . PMLR"},{"issue":"5","key":"3002_CR34","doi-asserted-by":"publisher","first-page":"1711","DOI":"10.1007\/s13042-023-01992-6","volume":"15","author":"H Bao","year":"2024","unstructured":"Bao H, Dong L, Wang W, Yang N, Piao S, Wei F (2024) Fine-tuning pretrained transformer encoders for sequence-to-sequence learning. Int J Mach Learn Cybern 15(5):1711\u20131728","journal-title":"Int J Mach Learn Cybern"},{"issue":"10","key":"3002_CR35","doi-asserted-by":"publisher","first-page":"4617","DOI":"10.1007\/s13042-024-02177-5","volume":"15","author":"LY Bayisa","year":"2024","unstructured":"Bayisa LY, Wang W, Wang Q, Ukwuoma CC, Gutema HK, Endris A, Abu T (2024) Unified deep learning model for multitask representation and transfer learning: image classification, object detection, and image captioning. Int J Mach Learn Cybern 15(10):4617\u20134637","journal-title":"Int J Mach Learn Cybern"},{"key":"3002_CR36","doi-asserted-by":"crossref","unstructured":"Lester B, Al-Rfou R, Constant N (2021) The power of scale for parameter-efficient prompt tuning. arXiv preprint arXiv:2104.08691","DOI":"10.18653\/v1\/2021.emnlp-main.243"},{"key":"3002_CR37","unstructured":"Li XL, Liang P (2021) Prefix-tuning: Optimizing continuous prompts for generation. arXiv preprint arXiv:2101.00190"},{"key":"3002_CR38","doi-asserted-by":"crossref","unstructured":"Jia M, Tang L, Chen B-C, Cardie C, Belongie S, Hariharan B, Lim S-N (2022) Visual prompt tuning. In: European Conference on Computer Vision, pp. 709\u2013727 . Springer","DOI":"10.1007\/978-3-031-19827-4_41"},{"key":"3002_CR39","doi-asserted-by":"crossref","unstructured":"Yin D, Yang Y, Wang Z, Yu H, Wei K, Sun X (2023) 1% vs 100%: Parameter-efficient low rank adapter for dense predictions. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 20116\u201320126","DOI":"10.1109\/CVPR52729.2023.01926"},{"key":"3002_CR40","doi-asserted-by":"crossref","unstructured":"Yin D, Hu L, Li B, Zhang Y, Yang X (2024) 5%$$>$$ 100%: Breaking performance shackles of full fine-tuning on visual recognition tasks. arXiv preprint arXiv:2408.08345","DOI":"10.1109\/CVPR52734.2025.01869"},{"key":"3002_CR41","unstructured":"Hu EJ, Shen Y, Wallis P, Allen-Zhu Z, Li Y, Wang S, Wang L, Chen W (2021) Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685"},{"key":"3002_CR42","doi-asserted-by":"crossref","unstructured":"Ding N, Qin Y, Yang G, Wei F, Yang Z, Su Y, Hu S, Chen Y, Chan C-M, Chen W et al (2022) Delta tuning: A comprehensive study of parameter efficient methods for pre-trained language models. arXiv preprint arXiv:2203.06904","DOI":"10.21203\/rs.3.rs-1553541\/v1"},{"key":"3002_CR43","first-page":"26462","volume":"35","author":"J Pan","year":"2022","unstructured":"Pan J, Lin Z, Zhu X, Shao J, Li H (2022) St-adapter: parameter-efficient image-to-video transfer learning. Adv Neural Inf Process Syst 35:26462\u201326477","journal-title":"Adv Neural Inf Process Syst"},{"issue":"2","key":"3002_CR44","doi-asserted-by":"publisher","first-page":"581","DOI":"10.1007\/s11263-023-01891-x","volume":"132","author":"P Gao","year":"2024","unstructured":"Gao P, Geng S, Zhang R, Ma T, Fang R, Zhang Y, Li H, Qiao Y (2024) Clip-adapter: Better vision-language models with feature adapters. Int J Comput Vision 132(2):581\u2013595","journal-title":"Int J Comput Vision"},{"key":"3002_CR45","doi-asserted-by":"crossref","unstructured":"Sung Y-L, Cho J, Bansal M (2022) Vl-adapter: Parameter-efficient transfer learning for vision-and-language tasks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 5227\u20135237","DOI":"10.1109\/CVPR52688.2022.00516"},{"key":"3002_CR46","doi-asserted-by":"crossref","unstructured":"Steitz J-MO, Roth S (2024) Adapters strike back. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 23449\u201323459","DOI":"10.1109\/CVPR52733.2024.02213"},{"key":"3002_CR47","unstructured":"Dosovitskiy A, Beyer L, Kolesnikov A, Weissenborn D, Zhai X, Unterthiner T, Dehghani M, Minderer M, Heigold G, Gelly S et al (2020) An image is worth 16x16 words. arXiv preprint arXiv:2010.11929 7"},{"key":"3002_CR48","doi-asserted-by":"crossref","unstructured":"Tran D, Wang H, Torresani L, Ray J, LeCun Y, Paluri M (2018) A closer look at spatiotemporal convolutions for action recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6450\u20136459","DOI":"10.1109\/CVPR.2018.00675"},{"key":"3002_CR49","doi-asserted-by":"crossref","unstructured":"Arnab A, Dehghani M, Heigold G, Sun C, Lu\u010di\u0107 M, Schmid C (2021) Vivit: A video vision transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6836\u20136846","DOI":"10.1109\/ICCV48922.2021.00676"},{"key":"3002_CR50","first-page":"1","volume":"16","author":"S Artham","year":"2024","unstructured":"Artham S, Shaikh SH (2024) A transformer-based convolutional local attention (convloa) method for temporal action localization. Int J Mach Learn Cybern 16:1\u201318","journal-title":"Int J Mach Learn Cybern"},{"key":"3002_CR51","doi-asserted-by":"crossref","unstructured":"Li X, Zhu Y, Wang L (2024) Zeroi2v: Zero-cost adaptation of pre-trained transformers from image to video. In: European Conference on Computer Vision, pp. 425\u2013443 . Springer","DOI":"10.1007\/978-3-031-73010-8_25"},{"key":"3002_CR52","doi-asserted-by":"crossref","unstructured":"He K, Fan H, Wu Y, Xie S, Girshick R (2020) Momentum contrast for unsupervised visual representation learning. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9729\u20139738","DOI":"10.1109\/CVPR42600.2020.00975"},{"key":"3002_CR53","doi-asserted-by":"crossref","unstructured":"Lin J, Gan C, Han S (2019) Tsm: Temporal shift module for efficient video understanding. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV)","DOI":"10.1109\/ICCV.2019.00718"},{"key":"3002_CR54","doi-asserted-by":"crossref","unstructured":"Patra S, Maitra A, Tiwari M, Kumaran K, Prabhu S, Punyeshwarananda S, Samanta S (2024) Hierarchical windowed graph attention network and a large scale dataset for isolated indian sign language recognition. arXiv preprint arXiv:2407.14224","DOI":"10.1007\/s10044-025-01529-3"},{"key":"3002_CR55","doi-asserted-by":"crossref","unstructured":"Yan S, Xiong Y, Lin D (2018) Spatial temporal graph convolutional networks for skeleton-based action recognition. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 32","DOI":"10.1609\/aaai.v32i1.12328"},{"key":"3002_CR56","doi-asserted-by":"crossref","unstructured":"Liu Z, Ning J, Cao Y, Wei Y, Zhang Z, Lin S, Hu H (2022) Video swin transformer. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3202\u20133211","DOI":"10.1109\/CVPR52688.2022.00320"}],"container-title":["International Journal of Machine Learning and Cybernetics"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13042-026-03002-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s13042-026-03002-x","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s13042-026-03002-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,21]],"date-time":"2026-03-21T06:12:02Z","timestamp":1774073522000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s13042-026-03002-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,3,21]]},"references-count":56,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2026,5]]}},"alternative-id":["3002"],"URL":"https:\/\/doi.org\/10.1007\/s13042-026-03002-x","relation":{"has-preprint":[{"id-type":"doi","id":"10.21203\/rs.3.rs-6259023\/v1","asserted-by":"object"}]},"ISSN":["1868-8071","1868-808X"],"issn-type":[{"value":"1868-8071","type":"print"},{"value":"1868-808X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,3,21]]},"assertion":[{"value":"19 March 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"9 January 2026","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"21 March 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"219"}}