{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,8]],"date-time":"2026-05-08T16:18:59Z","timestamp":1778257139389,"version":"3.51.4"},"reference-count":59,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,5,20]],"date-time":"2025-05-20T00:00:00Z","timestamp":1747699200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,5,20]],"date-time":"2025-05-20T00:00:00Z","timestamp":1747699200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"DOI":"10.13039\/501100017520","name":"Fakulta Informacn\u00edch Technologi\u00ed, Vysok\u00e9 Ucen\u00ed Technick\u00e9 v Brne","doi-asserted-by":"publisher","award":["FIT-S-23-8151"],"award-info":[{"award-number":["FIT-S-23-8151"]}],"id":[{"id":"10.13039\/501100017520","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["J Image Video Proc."],"DOI":"10.1186\/s13640-025-00670-7","type":"journal-article","created":{"date-parts":[[2025,5,20]],"date-time":"2025-05-20T15:20:40Z","timestamp":1747754440000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Reconstruction and enhancement techniques for overcoming occlusion in facial recognition"],"prefix":"10.1186","volume":"2025","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-1238-4062","authenticated-orcid":false,"given":"Filip","family":"Ple\u0161ko","sequence":"first","affiliation":[]},{"given":"Tom\u00e1\u0161","family":"Goldmann","sequence":"additional","affiliation":[]},{"given":"Kamil","family":"Malinka","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,5,20]]},"reference":[{"issue":"2","key":"670_CR1","doi-asserted-by":"publisher","first-page":"949","DOI":"10.1007\/s10462-017-9578-y","volume":"52","author":"B Lahasan","year":"2017","unstructured":"B. Lahasan, S.L. Lutfi, R. San-Segundo, A survey on techniques to handle face recognition challenges: occlusion, single sample per subject and expression. Artif. Intell. Rev 52(2), 949\u2013979 (2017). https:\/\/doi.org\/10.1007\/s10462-017-9578-y","journal-title":"Artif. Intell. Rev"},{"issue":"6","key":"670_CR2","doi-asserted-by":"publisher","first-page":"581","DOI":"10.1049\/bme2.12029","volume":"10","author":"D Zeng","year":"2021","unstructured":"D. Zeng, R. Veldhuis, L. Spreeuwers, A survey of face recognition techniques under occlusion. IET Biom 10(6), 581\u2013606 (2021)","journal-title":"IET Biom"},{"key":"670_CR3","doi-asserted-by":"publisher","unstructured":"Y. Zhao, J. Hu, X. Zhang, Face restoration based on gans and nst. In: Proceedings of the 2020 5th International Conference on Mathematics and Artificial Intelligence. ICMAI 2020, pp. 198\u2013203. Association for Computing Machinery, New York, NY, USA (2020). https:\/\/doi.org\/10.1145\/3395260.3395304","DOI":"10.1145\/3395260.3395304"},{"key":"670_CR4","doi-asserted-by":"crossref","unstructured":"X. Hong, P. Xiong, R. Ji, H. Fan, Deep Fusion Network for Image Completion (2019)","DOI":"10.1145\/3343031.3351002"},{"key":"670_CR5","doi-asserted-by":"publisher","unstructured":"Y. Li, S. Liu, J. Yang, M.-H. Yang, Generative face completion. In: 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 5892\u20135900 (2017). https:\/\/doi.org\/10.1109\/CVPR.2017.624","DOI":"10.1109\/CVPR.2017.624"},{"issue":"11","key":"670_CR6","doi-asserted-by":"publisher","first-page":"3204","DOI":"10.3390\/s20113204","volume":"20","author":"SN Uddin","year":"2020","unstructured":"S.N. Uddin, Y.J. Jung, Global and local attention-based free-form image inpainting. Sensors 20(11), 3204 (2020)","journal-title":"Sensors"},{"key":"670_CR7","doi-asserted-by":"crossref","unstructured":"R. Suvorov, E. Logacheva, A. Mashikhin, A. Remizova, A. Ashukha, A. Silvestrov, N. Kong, H. Goka, K. Park, V. Lempitsky, Resolution-robust large mask inpainting with fourier convolutions. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 2149\u20132159 (2022)","DOI":"10.1109\/WACV51458.2022.00323"},{"key":"670_CR8","doi-asserted-by":"crossref","unstructured":"A. Hore, D. Ziou, Image quality metrics: Psnr vs. ssim. In: 2010 20th International Conference on Pattern Recognition, pp. 2366\u20132369 (2010). IEEE","DOI":"10.1109\/ICPR.2010.579"},{"key":"670_CR9","doi-asserted-by":"publisher","first-page":"1398","DOI":"10.1109\/ACSSC.2003.1292216","volume":"2","author":"Z Wang","year":"2003","unstructured":"Z. Wang, E. Simoncelli, A. Bovik, Multis. Struct. Sim. Imag. Qual. Assess. 2, 1398\u201314022 (2003). https:\/\/doi.org\/10.1109\/ACSSC.2003.1292216","journal-title":"Multis. Struct. Sim. Imag. Qual. Assess."},{"key":"670_CR10","doi-asserted-by":"publisher","unstructured":"F. Ple\u0161ko, T. Goldmann, K. Malinka, Facial image reconstruction and its influence to face recognition. In: 2023 International Conference of the Biometrics Special Interest Group (BIOSIG), pp. 1\u20135 (2023). https:\/\/doi.org\/10.1109\/BIOSIG58226.2023.10346000","DOI":"10.1109\/BIOSIG58226.2023.10346000"},{"key":"670_CR11","doi-asserted-by":"publisher","unstructured":"X. Gao, M. Nguyen, W.Q. Yan, Face image inpainting based on generative adversarial network. In: 2021 36th International Conference on Image and Vision Computing New Zealand (IVCNZ), pp. 1\u20136 (2021). https:\/\/doi.org\/10.1109\/IVCNZ54163.2021.9653347","DOI":"10.1109\/IVCNZ54163.2021.9653347"},{"key":"670_CR12","doi-asserted-by":"crossref","unstructured":"M.N. Ye\u011fin, M.F. Amasyal\u0131, Theoretical research on generative diffusion models: an overview. arXiv preprint arXiv:2404.09016 (2024)","DOI":"10.2139\/ssrn.4627329"},{"key":"670_CR13","doi-asserted-by":"crossref","unstructured":"X. Guo, H. Yang, D. Huang, Image inpainting via conditional texture and structure dual generation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 14134\u201314143 (2021)","DOI":"10.1109\/ICCV48922.2021.01387"},{"key":"670_CR14","unstructured":"H.-W. Dong, Y.-H. Yang, Towards a deeper understanding of adversarial losses under a discriminative adversarial network setting. arXiv preprint arXiv:1901.08753 (2019)"},{"issue":"7","key":"670_CR15","doi-asserted-by":"publisher","first-page":"3266","DOI":"10.1109\/TVCG.2022.3156949","volume":"29","author":"Y Zeng","year":"2022","unstructured":"Y. Zeng, J. Fu, H. Chao, B. Guo, Aggregated contextual transformations for high-resolution image inpainting. IEEE Trans. Vis. Comput. Gr. 29(7), 3266\u20133280 (2022)","journal-title":"IEEE Trans. Vis. Comput. Gr."},{"key":"670_CR16","unstructured":"S. Zhou, K.C.K. Chan, C. Li, C.C. Loy, Towards Robust Blind Face Restoration with Codebook Lookup Transformer (2022)"},{"key":"670_CR17","doi-asserted-by":"crossref","unstructured":"Z. Yue, C.C. Loy, DifFace: Blind Face Restoration with Diffused Error Contraction (2023)","DOI":"10.1109\/TPAMI.2024.3432651"},{"key":"670_CR18","doi-asserted-by":"crossref","unstructured":"X. Wang, Y. Li, H. Zhang, Y. Shan, Towards Real-World Blind Face Restoration with Generative Facial Prior (2021)","DOI":"10.1109\/CVPR46437.2021.00905"},{"key":"670_CR19","doi-asserted-by":"crossref","unstructured":"X. Li, C. Chen, S. Zhou, X. Lin, W. Zuo, L. Zhang, Blind Face Restoration via Deep Multi-scale Component Dictionaries (2020)","DOI":"10.1007\/978-3-030-58545-7_23"},{"key":"670_CR20","doi-asserted-by":"crossref","unstructured":"T. Karras, S. Laine, T. Aila, A Style-Based Generator Architecture for Generative Adversarial Networks (2019)","DOI":"10.1109\/CVPR.2019.00453"},{"key":"670_CR21","unstructured":"A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A.N. Gomez, L. Kaiser, I. Polosukhin, Attention Is All You Need (2023)"},{"key":"670_CR22","unstructured":"A. Tamkin, M. Taufeeque, N.D. Goodman, Codebook Features: Sparse and Discrete Interpretability for Neural Networks (2023)"},{"issue":"1","key":"670_CR23","first-page":"9","volume":"1","author":"Gao Huang","year":"2019","unstructured":"Gao Huang, Hua Lan, Deep learning for super-resolution in a field emission scanning electron microscope. Ai 1(1), 9 (2019)","journal-title":"Ai"},{"key":"670_CR24","doi-asserted-by":"crossref","unstructured":"Y. Taigman, M. Yang, M. Ranzato, L. Wolf, Deepface: Closing the gap to human-level performance in face verification. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1701\u20131708 (2014)","DOI":"10.1109\/CVPR.2014.220"},{"key":"670_CR25","doi-asserted-by":"crossref","unstructured":"F. Schroff, D. Kalenichenko, J. Philbin, Facenet: A unified embedding for face recognition and clustering. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 815\u2013823 (2015)","DOI":"10.1109\/CVPR.2015.7298682"},{"key":"670_CR26","doi-asserted-by":"crossref","unstructured":"W. Liu, Y. Wen, Z. Yu, M. Li, B. Raj, L. Song, Sphereface: Deep hypersphere embedding for face recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 212\u2013220 (2017)","DOI":"10.1109\/CVPR.2017.713"},{"issue":"7","key":"670_CR27","doi-asserted-by":"publisher","first-page":"926","DOI":"10.1109\/LSP.2018.2822810","volume":"25","author":"F Wang","year":"2018","unstructured":"F. Wang, J. Cheng, W. Liu, H. Liu, Additive margin softmax for face verification. IEEE Signal Process. Lett. 25(7), 926\u2013930 (2018)","journal-title":"IEEE Signal Process. Lett."},{"key":"670_CR28","doi-asserted-by":"crossref","unstructured":"H. Wang, Y. Wang, Z. Zhou, X. Ji, D. Gong, J. Zhou, Z. Li, W. Liu, Cosface: Large margin cosine loss for deep face recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5265\u20135274 (2018)","DOI":"10.1109\/CVPR.2018.00552"},{"issue":"10","key":"670_CR29","doi-asserted-by":"publisher","first-page":"5962","DOI":"10.1109\/tpami.2021.3087709","volume":"44","author":"J Deng","year":"2022","unstructured":"J. Deng, J. Guo, J. Yang, N. Xue, I. Kotsia, S. Zafeiriou, Arcface: Additive angular margin loss for deep face recognition. IEEE Trans. Pattern Anal. Mach. Intell. 44(10), 5962\u20135979 (2022). https:\/\/doi.org\/10.1109\/tpami.2021.3087709","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"670_CR30","doi-asserted-by":"publisher","first-page":"2587","DOI":"10.1109\/TIP.2020.3048632","volume":"30","author":"Y Zhong","year":"2021","unstructured":"Y. Zhong, W. Deng, J. Hu, D. Zhao, X. Li, D. Wen, Sface: Sigmoid-constrained hypersphere loss for robust face recognition. IEEE Trans. Image Process. 30, 2587\u20132598 (2021)","journal-title":"IEEE Trans. Image Process."},{"key":"670_CR31","doi-asserted-by":"crossref","unstructured":"Q. Meng, S. Zhao, Z. Huang, F. Zhou, Magface: A universal representation for face recognition and quality assessment. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14225\u201314234 (2021)","DOI":"10.1109\/CVPR46437.2021.01400"},{"key":"670_CR32","first-page":"7","volume":"1","author":"GB Huang","year":"2007","unstructured":"G.B. Huang, M. Ramesh, T. Berg, E. Learned-Miller, Labeled faces in the wild: A database for studying face recognition in unconstrained environments. Tech. Rep. 1, 7\u201349 (2007)","journal-title":"Tech. Rep."},{"key":"670_CR33","doi-asserted-by":"crossref","unstructured":"S. Sengupta, J.C. Cheng, C.D. Castillo, V.M. Patel, R. Chellappa, D.W. Jacobs, Frontal to profile face verification in the wild. In: IEEE Conference on Applications of Computer Vision (2016)","DOI":"10.1109\/WACV.2016.7477558"},{"key":"670_CR34","doi-asserted-by":"crossref","unstructured":"S. Moschoglou, A. Papaioannou, C. Sagonas, J. Deng, I. Kotsia, S. Zafeiriou, Agedb: the first manually collected, in-the-wild age database. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshop, 2017; 2 5","DOI":"10.1109\/CVPRW.2017.250"},{"key":"670_CR35","first-page":"5","volume":"5","author":"T Zheng","year":"2018","unstructured":"T. Zheng, W. Deng, Cross-pose lfw: A database for studying cross-pose face recognition in unconstrained environments. Tech. Rep. 5, 5 (2018)","journal-title":"Tech. Rep."},{"key":"670_CR36","unstructured":"P. Terh\u00f6rst, M. Ihlefeld, M. Huber, N. Damer, F. Kirchbuchner, K. Raja, A. Kuijper, QMagFace: Simple and accurate quality-aware face recognition. CoRR arXiv:abs\/2111.13475 (2021)"},{"issue":"3","key":"670_CR37","first-page":"471","volume":"10","author":"CR Kavita","year":"2022","unstructured":"C.R. Kavita, Face recognition challenges and solutions using machine learning. Int. J. Intell. Syst. Appl. Eng. 10(3), 471\u2013476 (2022)","journal-title":"Int. J. Intell. Syst. Appl. Eng."},{"key":"670_CR38","unstructured":"A. Radford, L. Metz, S. Chintala, Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks (2016)"},{"issue":"2","key":"670_CR39","doi-asserted-by":"publisher","first-page":"89","DOI":"10.37868\/hsd.v3i2.71","volume":"3","author":"M Tripathi","year":"2021","unstructured":"M. Tripathi, Facial image denoising using autoencoder and unet. Herit. Sustain. Dev 3(2), 89\u201396 (2021). https:\/\/doi.org\/10.37868\/hsd.v3i2.71","journal-title":"Herit. Sustain. Dev"},{"key":"670_CR40","unstructured":"J.T. Springenberg, A. Dosovitskiy, T. Brox, M. Riedmiller, Striving for Simplicity: The All Convolutional Net (2015)"},{"key":"670_CR41","doi-asserted-by":"publisher","DOI":"10.5281\/zenodo.5449801","author":"Y Sha","year":"2021","unstructured":"Y. Sha, Keras-unet-collection. GitHub (2021). https:\/\/doi.org\/10.5281\/zenodo.5449801","journal-title":"GitHub"},{"issue":"4","key":"670_CR42","doi-asserted-by":"publisher","first-page":"588","DOI":"10.1109\/JAS.2017.7510583","volume":"4","author":"K Wang","year":"2017","unstructured":"K. Wang, C. Gou, Y. Duan, Y. Lin, X. Zheng, F.-Y. Wang, Generative adversarial networks: introduction and outlook. IEEE\/CAA J. Autom. Sinica 4(4), 588\u2013598 (2017)","journal-title":"IEEE\/CAA J. Autom. Sinica"},{"key":"670_CR43","unstructured":"I.J. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville, Y. Bengio, Generative Adversarial Networks (2014)"},{"key":"670_CR44","doi-asserted-by":"crossref","unstructured":"Z. Liu, P. Luo, X. Wang, X. Tang, Deep learning face attributes in the wild. In: Proceedings of International Conference on Computer Vision (ICCV) (2015)","DOI":"10.1109\/ICCV.2015.425"},{"key":"670_CR45","unstructured":"G. Liu, F.A. Reda, K.J. Shih, T.-C. Wang, A. Tao, B. Catanzaro, Nvidia irregular mask dataset. (2018). https:\/\/nv-adlr.github.io\/publication\/partialconv-inpainting"},{"key":"670_CR46","doi-asserted-by":"crossref","unstructured":"R. Zhang, P. Isola, A.A. Efros, E. Shechtman, O. Wang, The unreasonable effectiveness of deep features as a perceptual metric. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 586\u2013595 (2018)","DOI":"10.1109\/CVPR.2018.00068"},{"issue":"8","key":"670_CR47","doi-asserted-by":"publisher","first-page":"2378","DOI":"10.1109\/TIP.2011.2109730","volume":"20","author":"L Zhang","year":"2011","unstructured":"L. Zhang, L. Zhang, X. Mou, D. Zhang, Fsim: A feature similarity index for image quality assessment. IEEE Trans. Imag. Process. 20(8), 2378\u20132386 (2011)","journal-title":"IEEE Trans. Imag. Process."},{"key":"670_CR48","unstructured":"Z. Wang, E.P. Simoncelli, A.C. Bovik, Multiscale structural similarity for image quality assessment. In: The Thrity-Seventh Asilomar Conference on Signals, Systems & Computers, 2003; 2; 1398\u20131402"},{"key":"670_CR49","first-page":"1","volume":"30","author":"M Heusel","year":"2017","unstructured":"M. Heusel, H. Ramsauer, T. Unterthiner, B. Nessler, S. Hochreiter, Gans trained by a two time-scale update rule converge to a local nash equilibrium. Adv. Neural. Inf. Process. Syst. 30, 1 (2017)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"670_CR50","doi-asserted-by":"crossref","unstructured":"A. Hore, D. Ziou, Image quality metrics: Psnr vs. ssim. In: 2010 20th International Conference on Pattern Recognition, pp. 2366\u20132369 (2010). IEEE","DOI":"10.1109\/ICPR.2010.579"},{"key":"670_CR51","doi-asserted-by":"crossref","unstructured":"J. Deng, J. Guo, Y. Zhou, J. Yu, I. Kotsia, S. Zafeiriou, Retinaface: Single-stage dense face localisation in the wild. arXiv preprint arXiv:1905.00641 (2019)","DOI":"10.1109\/CVPR42600.2020.00525"},{"key":"670_CR52","doi-asserted-by":"crossref","unstructured":"K. He, X. Zhang, S. Ren, J. Sun, Deep Residual Learning for Image Recognition (2015)","DOI":"10.1109\/CVPR.2016.90"},{"key":"670_CR53","unstructured":"I.C. Duta, L. Liu, F. Zhu, L. Shao, Improved Residual Networks for Image and Video Recognition (2020)"},{"key":"670_CR54","doi-asserted-by":"crossref","unstructured":"B. Adhikari, X. Ni, E. Rahtu, H. Huttunen, Towards a real-time facial analysis system. In: 2021 IEEE 23rd International Workshop on Multimedia Signal Processing (MMSP), pp. 1\u20136 (2021). IEEE","DOI":"10.1109\/MMSP53017.2021.9733663"},{"key":"670_CR55","doi-asserted-by":"crossref","unstructured":"H. Cao, Y. Wang, J. Chen, D. Jiang, X. Zhang, Q. Tian, M. Wang, Swin-unet: Unet-like pure transformer for medical image segmentation. In: European Conference on Computer Vision, pp. 205\u2013218 (2022). Springer","DOI":"10.1007\/978-3-031-25066-8_9"},{"key":"670_CR56","doi-asserted-by":"crossref","unstructured":"H. Huang, L. Lin, R. Tong, H. Hu, Q. Zhang, Y. Iwamoto, X. Han, Y.-W. Chen, J. Wu, Unet 3+: A full-scale connected unet for medical image segmentation. In: ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1055\u20131059 (2020). IEEE","DOI":"10.1109\/ICASSP40776.2020.9053405"},{"key":"670_CR57","doi-asserted-by":"crossref","unstructured":"O. Ronneberger, P. Fischer, T. Brox, U-net: Convolutional networks for biomedical image segmentation. In: Medical Image Computing and Computer-assisted intervention\u2013MICCAI 2015: 18th International Conference, Munich, Germany, October 5-9, 2015, Proceedings, Part III 18, pp. 234\u2013241 (2015). Springer","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"670_CR58","unstructured":"F. Milletari, N. Navab, S.-A. Ahmadi, V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation (2016). https:\/\/arxiv.org\/abs\/1606.04797"},{"key":"670_CR59","doi-asserted-by":"crossref","unstructured":"Z. Zhou, M.M. Rahman\u00a0Siddiquee, N. Tajbakhsh, J. Liang, Unet++: A nested u-net architecture for medical image segmentation. In: Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support: 4th International Workshop, DLMIA 2018, and 8th International Workshop, ML-CDS 2018, Held in Conjunction with MICCAI 2018, Granada, Spain, September 20, 2018, Proceedings 4, pp. 3\u201311 (2018). Springer","DOI":"10.1007\/978-3-030-00889-5_1"}],"container-title":["EURASIP Journal on Image and Video Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1186\/s13640-025-00670-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1186\/s13640-025-00670-7\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1186\/s13640-025-00670-7.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,5,20]],"date-time":"2025-05-20T15:20:53Z","timestamp":1747754453000},"score":1,"resource":{"primary":{"URL":"https:\/\/jivp-eurasipjournals.springeropen.com\/articles\/10.1186\/s13640-025-00670-7"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,5,20]]},"references-count":59,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2025,12]]}},"alternative-id":["670"],"URL":"https:\/\/doi.org\/10.1186\/s13640-025-00670-7","relation":{"has-preprint":[{"id-type":"doi","id":"10.21203\/rs.3.rs-4349727\/v1","asserted-by":"object"}]},"ISSN":["1687-5281"],"issn-type":[{"value":"1687-5281","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,5,20]]},"assertion":[{"value":"30 April 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 May 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 May 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Competing interests"}}],"article-number":"9"}}