{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,2,26]],"date-time":"2026-02-26T15:19:49Z","timestamp":1772119189226,"version":"3.50.1"},"reference-count":49,"publisher":"Springer Science and Business Media LLC","issue":"7","license":[{"start":{"date-parts":[[2024,11,16]],"date-time":"2024-11-16T00:00:00Z","timestamp":1731715200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,11,16]],"date-time":"2024-11-16T00:00:00Z","timestamp":1731715200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"national nature science foundation of china","doi-asserted-by":"crossref","award":["62106214"],"award-info":[{"award-number":["62106214"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"Provincial Key Laboratory Perfor mance Subsidy Project","award":["2567612H"],"award-info":[{"award-number":["2567612H"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2025,5]]},"DOI":"10.1007\/s00371-024-03702-x","type":"journal-article","created":{"date-parts":[[2024,11,16]],"date-time":"2024-11-16T09:10:22Z","timestamp":1731748222000},"page":"4991-5003","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":2,"title":["Selection and guidance: high-dimensional identity consistency preservation for face inpainting"],"prefix":"10.1007","volume":"41","author":[{"given":"Yaqian","family":"Li","sequence":"first","affiliation":[]},{"given":"Xin","family":"Zhan","sequence":"additional","affiliation":[]},{"given":"Haibin","family":"Li","sequence":"additional","affiliation":[]},{"given":"Wenming","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,11,16]]},"reference":[{"key":"3702_CR1","doi-asserted-by":"publisher","DOI":"10.1016\/j.eswa.2023.123111","volume":"245","author":"Y Chen","year":"2024","unstructured":"Chen, Y., Xia, R., Yang, K., Zou, K.: Micu: Image super-resolution via multi-level information compensation and u-net. Expert Syst. Appl. 245, 123111 (2024)","journal-title":"Expert Syst. Appl."},{"key":"3702_CR2","doi-asserted-by":"crossref","unstructured":"Ge, S.,\u00a0Li, J.,\u00a0Ye, Q.,\u00a0Luo, Z.: Detecting masked faces in the wild with lle-cnns, in: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2682\u20132690 (2017)","DOI":"10.1109\/CVPR.2017.53"},{"key":"3702_CR3","doi-asserted-by":"crossref","unstructured":"Yi, Z.,\u00a0Tang, Q.,\u00a0Azizi, S.,\u00a0Jang, D.,\u00a0Xu, Z.: Contextual residual aggregation for ultra high-resolution image inpainting. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition, pp. 7508\u20137517 (2020)","DOI":"10.1109\/CVPR42600.2020.00753"},{"key":"3702_CR4","doi-asserted-by":"crossref","unstructured":"He, K.,\u00a0Zhang, X.,\u00a0Ren, S.,\u00a0Sun, J.: Deep residual learning for image recognition, in: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"3702_CR5","doi-asserted-by":"publisher","DOI":"10.1016\/j.asoc.2024.111392","volume":"154","author":"Y Chen","year":"2024","unstructured":"Chen, Y., Xia, R., Yang, K., Zou, K.: Dnnam: Image inpainting algorithm via deep neural networks and attention mechanism. Appl. Soft Comput. 154, 111392 (2024)","journal-title":"Appl. Soft Comput."},{"key":"3702_CR6","unstructured":"Wang, Y.,\u00a0Tao, X.,\u00a0Qi, X.,\u00a0Shen, X.,\u00a0Jia, J.: Image inpainting via generative multi-column convolutional neural networks. Adv. Neural Inf. Process. Syst. 31 (2018)"},{"key":"3702_CR7","doi-asserted-by":"publisher","first-page":"95","DOI":"10.1016\/j.neucom.2020.12.118","volume":"437","author":"J Liu","year":"2021","unstructured":"Liu, J., Jung, C.: Facial image inpainting using attention-based multi-level generative network. Neurocomputing 437, 95\u2013106 (2021)","journal-title":"Neurocomputing"},{"key":"3702_CR8","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2021.108415","volume":"124","author":"X Zhang","year":"2022","unstructured":"Zhang, X., Wang, X., Shi, C., Yan, Z., Li, X., Kong, B., Lyu, S., Zhu, B., Lv, J., Yin, Y., et al.: De-gan: Domain embedded gan for high quality face image inpainting. Pattern Recogn. 124, 108415 (2022)","journal-title":"Pattern Recogn."},{"key":"3702_CR9","doi-asserted-by":"publisher","first-page":"2945","DOI":"10.1007\/s13042-023-01811-y","volume":"14","author":"Y Chen","year":"2023","unstructured":"Chen, Y., Xia, R., Zou, K., Yang, K.: Rnon: image inpainting via repair network and optimization network. Int. J. Mach. Learn. Cybern. 14, 2945\u20132961 (2023)","journal-title":"Int. J. Mach. Learn. Cybern."},{"key":"3702_CR10","doi-asserted-by":"publisher","DOI":"10.1016\/j.cviu.2023.103883","volume":"238","author":"Y Chen","year":"2024","unstructured":"Chen, Y., Xia, R., Yang, K., Zou, K.: Mfmam: Image inpainting via multi-scale feature module with attention module. Comput. Vis. Image Underst. 238, 103883 (2024)","journal-title":"Comput. Vis. Image Underst."},{"key":"3702_CR11","doi-asserted-by":"crossref","unstructured":"Yu, J.,\u00a0Lin, J.,\u00a0Yang, J.,\u00a0Shen, X.,\u00a0Lu, X., Huang, T.\u00a0S.: Generative image inpainting with contextual attention. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 5505\u20135514 (2018)","DOI":"10.1109\/CVPR.2018.00577"},{"key":"3702_CR12","doi-asserted-by":"publisher","first-page":"7991","DOI":"10.1109\/TII.2021.3064369","volume":"17","author":"MN Cheema","year":"2021","unstructured":"Cheema, M.N., Nazir, A., Yang, P., Sheng, B., Li, P., Li, H., Wei, X., Qin, J., Kim, J., Feng, D.D.: Modified gan-caed to minimize risk of unintentional liver major vessels cutting by controlled segmentation using cta\/spet-ct. IEEE Trans. Ind. Inf. 17, 7991\u20138002 (2021)","journal-title":"IEEE Trans. Ind. Inf."},{"key":"3702_CR13","doi-asserted-by":"publisher","first-page":"1333","DOI":"10.1109\/TMM.2023.3279993","volume":"26","author":"S Xiao","year":"2023","unstructured":"Xiao, S., Lan, G., Yang, J., Lu, W., Meng, Q., Gao, X.: Mcs-gan: a different understanding for generalization of deep forgery detection. IEEE Trans. Multimedia 26, 1333\u20131345 (2023)","journal-title":"IEEE Trans. Multimedia"},{"key":"3702_CR14","doi-asserted-by":"publisher","first-page":"3647","DOI":"10.1007\/s00371-023-02938-3","volume":"39","author":"S Huang","year":"2023","unstructured":"Huang, S., Liu, X., Tan, T., Hu, M., Wei, X., Chen, T., Sheng, B.: Transmrsr: transformer-based self-distilled generative prior for brain mri super-resolution. Vis. Comput. 39, 3647\u20133659 (2023)","journal-title":"Vis. Comput."},{"key":"3702_CR15","doi-asserted-by":"crossref","unstructured":"Wang, X.,\u00a0Li, Y.,\u00a0Zhang, H.,\u00a0Shan, Y.: Towards real-world blind face restoration with generative facial prior, in: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. pp. 9168\u20139178 (2021)","DOI":"10.1109\/CVPR46437.2021.00905"},{"key":"3702_CR16","unstructured":"Nazeri, K.,\u00a0Ng, E.,\u00a0Joseph, T., Qureshi, F.\u00a0Z.,\u00a0Ebrahimi, M.: Edgeconnect: Generative image inpainting with adversarial edge learning, arXiv preprint arXiv:1901.00212 (2019)"},{"key":"3702_CR17","doi-asserted-by":"crossref","unstructured":"Yang, Y.,\u00a0Guo, X.: Generative landmark guided face inpainting. In: Pattern Recognition and Computer Vision: Third Chinese Conference, PRCV 2020, Nanjing, China, October 16\u201318, 2020, Proceedings, Part I 3, Springer, pp. 14\u201326 (2020)","DOI":"10.1007\/978-3-030-60633-6_2"},{"key":"3702_CR18","doi-asserted-by":"crossref","unstructured":"Yu, J.,\u00a0Lin, Z.,\u00a0Yang, J.,\u00a0Shen, X.,\u00a0Lu, X., Huang, T.\u00a0S.: Free-form image inpainting with gated convolution. In: Proceedings of the IEEE\/CVF international conference on computer vision. pp. 4471\u20134480 (2019)","DOI":"10.1109\/ICCV.2019.00457"},{"key":"3702_CR19","doi-asserted-by":"publisher","DOI":"10.1016\/j.compeleceng.2022.108136","volume":"101","author":"S Li","year":"2022","unstructured":"Li, S., Liu, Z., Wu, D., Huo, H., Wang, H., Zhang, K.: Low-resolution face recognition based on feature-mapping face hallucination. Comput. Electr. Eng. 101, 108136 (2022)","journal-title":"Comput. Electr. Eng."},{"key":"3702_CR20","first-page":"71","volume":"4","author":"X Wang","year":"2018","unstructured":"Wang, X., Wei, H., Gao, C., et al.: Identity preserving face completion with generative adversarial networks. Chin. J. Netw. Inf. Secur. 4, 71\u201376 (2018)","journal-title":"Chin. J. Netw. Inf. Secur."},{"key":"3702_CR21","doi-asserted-by":"crossref","unstructured":"Wang, Z.,\u00a0Tang, X.,\u00a0Luo, W.,\u00a0Gao, S.: Face aging with identity-preserved conditional generative adversarial networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 7939\u20137947 (2018)","DOI":"10.1109\/CVPR.2018.00828"},{"key":"3702_CR22","unstructured":"J. Z.-Q., Xu,\u00a0Zhang, Y.,\u00a0Luo, T.,\u00a0Xiao, Y.,\u00a0Ma, Z.: Frequency principle: Fourier analysis sheds light on deep neural networks, arXiv preprint arXiv:1901.06523 (2019)"},{"key":"3702_CR23","doi-asserted-by":"crossref","unstructured":"Jiang, L.,\u00a0Dai, B.,\u00a0Wu, W., Loy, C.\u00a0C.: Focal frequency loss for image reconstruction and synthesis. In: Proceedings of the IEEE\/CVF international conference on computer vision. pp. 13919\u201313929 (2021)","DOI":"10.1109\/ICCV48922.2021.01366"},{"key":"3702_CR24","doi-asserted-by":"crossref","unstructured":"Wang, H.,\u00a0Wu, X.,\u00a0Huang, Z., Xing, E.\u00a0P.: High-frequency component helps explain the generalization of convolutional neural networks. un: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. pp. 8684\u20138694 (2020)","DOI":"10.1109\/CVPR42600.2020.00871"},{"key":"3702_CR25","doi-asserted-by":"crossref","unstructured":"Liao, L.,\u00a0Xiao, J.,\u00a0Wang, Z., Lin, C.-W.,\u00a0Satoh, S.: Guidance and evaluation: semantic-aware image inpainting for mixed scenes. In: Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part XXVII 16, Springer. pp. 683\u2013700 (2020)","DOI":"10.1007\/978-3-030-58583-9_41"},{"key":"3702_CR26","doi-asserted-by":"publisher","first-page":"1768","DOI":"10.1109\/TPAMI.2006.233","volume":"28","author":"L Grady","year":"2006","unstructured":"Grady, L.: Random walks for image segmentation. IEEE Trans. Pattern Anal. Mach. Intell. 28, 1768\u20131783 (2006)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"6","key":"3702_CR27","doi-asserted-by":"publisher","DOI":"10.1016\/j.patter.2022.100512","volume":"3","author":"R Liu","year":"2022","unstructured":"Liu, R., Wang, X., Wu, Q., Dai, L., Fang, X., Yan, T., Son, J., Tang, S., Li, J., Gao, Z., et al.: Deepdrid: diabetic retinopathy-grading and image quality estimation challenge. Patterns 3(6), 100512 (2022)","journal-title":"Patterns"},{"key":"3702_CR28","doi-asserted-by":"crossref","unstructured":"Ou, F.-Z.,\u00a0Chen, X.,\u00a0Zhang, R.,\u00a0Huang, Y.,\u00a0Li, S.,\u00a0Li, J.,\u00a0Li, Y.,\u00a0Cao, L., Wang, Y.-G.: Sdd-fiqa: unsupervised face image quality assessment with similarity distribution distance. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. pp. 7670\u20137679 (2021)","DOI":"10.1109\/CVPR46437.2021.00758"},{"key":"3702_CR29","doi-asserted-by":"crossref","unstructured":"Zhang, L.,\u00a0Chen, X.,\u00a0Tu, X.,\u00a0Wan, P.,\u00a0Xu, N.,\u00a0Ma, K.: Wavelet knowledge distillation: towards efficient image-to-image translation. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. pp 12464\u201312474 (2022)","DOI":"10.1109\/CVPR52688.2022.01214"},{"key":"3702_CR30","doi-asserted-by":"publisher","first-page":"3675","DOI":"10.1007\/s00371-023-02984-x","volume":"39","author":"AH Al-Jebrni","year":"2023","unstructured":"Al-Jebrni, A.H., Ali, S.G., Li, H., Lin, X., Li, P., Jung, Y., Kim, J., Feng, D.D., Sheng, B., Jiang, L., et al.: Sthy-net: a feature fusion-enhanced dense-branched modules network for small thyroid nodule classification from ultrasound images. Vis. Comput. 39, 3675\u20133689 (2023)","journal-title":"Vis. Comput."},{"key":"3702_CR31","doi-asserted-by":"publisher","first-page":"3051","DOI":"10.1007\/s11263-021-01515-2","volume":"129","author":"C Yu","year":"2021","unstructured":"Yu, C., Gao, C., Wang, J., Yu, G., Shen, C., Sang, N.: Bisenet v2: Bilateral network with guided aggregation for real-time semantic segmentation. Int. J. Comput. Vision 129, 3051\u20133068 (2021)","journal-title":"Int. J. Comput. Vision"},{"key":"3702_CR32","first-page":"16211","volume":"33","author":"G Nikolentzos","year":"2020","unstructured":"Nikolentzos, G., Vazirgiannis, M.: Random walk graph neural networks. Adv. Neural. Inf. Process. Syst. 33, 16211\u201316222 (2020)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"3702_CR33","doi-asserted-by":"crossref","unstructured":"Huang, R.,\u00a0Zhang, S.,\u00a0Li, T.,\u00a0He, R.: Beyond face rotation: Global and local perception gan for photorealistic and identity preserving frontal view synthesis. In: Proceedings of the IEEE international conference on computer vision. pp. 2439\u20132448 (2017)","DOI":"10.1109\/ICCV.2017.267"},{"key":"3702_CR34","doi-asserted-by":"crossref","unstructured":"Xu, K.,\u00a0Qin, M.,\u00a0Sun, F.,\u00a0Wang, Y., Chen, Y.-K.,\u00a0Ren, F.: Learning in the frequency domain. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. pp. 1740\u20131749 (2020)","DOI":"10.1109\/CVPR42600.2020.00181"},{"key":"3702_CR35","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2023.109741","volume":"143","author":"W Zhang","year":"2023","unstructured":"Zhang, W., Wang, Y., Ni, B., Yang, X.: Fully context-aware image inpainting with a learned semantic pyramid. Pattern Recognit. 143, 109741 (2023)","journal-title":"Pattern Recognit."},{"key":"3702_CR36","doi-asserted-by":"crossref","unstructured":"Johnson, J.,\u00a0Alahi, A.,\u00a0Fei-Fei, L.: Perceptual losses for real-time style transfer and super-resolution, in: Computer Vision\u2013ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11\u201314, 2016, Proceedings, Part II 14, Springer. pp. 694\u2013711 (2016)","DOI":"10.1007\/978-3-319-46475-6_43"},{"key":"3702_CR37","doi-asserted-by":"crossref","unstructured":"Gatys, L.\u00a0A., Ecker,A.\u00a0S.,\u00a0Bethge, M.: Image style transfer using convolutional neural networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 2414\u20132423 (2016)","DOI":"10.1109\/CVPR.2016.265"},{"key":"3702_CR38","doi-asserted-by":"crossref","unstructured":"Yang, C.,\u00a0Lu, X.,\u00a0Lin, Z.,\u00a0Shechtman, E.,\u00a0Wang, O.,\u00a0Li, H.: High-resolution image inpainting using multi-scale neural patch synthesis. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp 6721\u20136729 (2017)","DOI":"10.1109\/CVPR.2017.434"},{"key":"3702_CR39","doi-asserted-by":"crossref","unstructured":"Liu, G., Reda, F.\u00a0A., Shih, K.\u00a0J., Wang, T.-C.,\u00a0Tao, A.,\u00a0Catanzaro, B.: Image inpainting for irregular holes using partial convolutions. In: Proceedings of the European conference on computer vision (ECCV), pp. 85\u2013100 (2018)","DOI":"10.1007\/978-3-030-01252-6_6"},{"key":"3702_CR40","unstructured":"Karras, T.,\u00a0Aila, T.,\u00a0Laine, S.,\u00a0Lehtinen, J.: Progressive growing of gans for improved quality, stability, and variation, arXiv preprint arXiv:1710.10196 (2017)"},{"key":"3702_CR41","doi-asserted-by":"publisher","first-page":"56028","DOI":"10.1109\/ACCESS.2019.2899940","volume":"7","author":"L Zhang","year":"2019","unstructured":"Zhang, L., Gui, G., Khattak, A.M., Wang, M., Gao, W., Jia, J.: Multi-task cascaded convolutional networks based intelligent fruit detection for designing automated robot. IEEE Access 7, 56028\u201356038 (2019)","journal-title":"IEEE Access"},{"key":"3702_CR42","doi-asserted-by":"crossref","unstructured":"Yang, C.,\u00a0Lu, X.,\u00a0Lin, Z.,\u00a0Shechtman, E.,\u00a0Wang, O.,\u00a0Li, H.: High-resolution image inpainting using multi-scale neural patch synthesis. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 6721\u20136729 (2017)","DOI":"10.1109\/CVPR.2017.434"},{"key":"3702_CR43","doi-asserted-by":"crossref","unstructured":"Yu, J.,\u00a0Lin, Z.,\u00a0Yang, J.,\u00a0Shen, X.,\u00a0Lu, X., Huang, T.\u00a0S.: Free-form image inpainting with gated convolution. In: Proceedings of the IEEE\/CVF international conference on computer vision. pp. 4471\u20134480 (2019a)","DOI":"10.1109\/ICCV.2019.00457"},{"key":"3702_CR44","doi-asserted-by":"crossref","unstructured":"Yu, J.,\u00a0Lin, Z.,\u00a0Yang, J.,\u00a0Shen, X.,\u00a0Lu, X., Huang, T.\u00a0S.: Free-form image inpainting with gated convolution. In: Proceedings of the IEEE\/CVF international conference on computer vision. pp. 4471\u20134480 (2019b)","DOI":"10.1109\/ICCV.2019.00457"},{"key":"3702_CR45","unstructured":"Heusel, M.,\u00a0Ramsauer, H.,\u00a0Unterthiner, T.,\u00a0Nessler, B.,\u00a0Hochreiter, S.: Gans trained by a two time-scale update rule converge to a local nash equilibrium. Adv. Neural Inf. Process. Syst. 30 (2017)"},{"key":"3702_CR46","doi-asserted-by":"crossref","unstructured":"Li, W.,\u00a0Lin, Z.,\u00a0Zhou, K.,\u00a0Qi, L.,\u00a0Wang, Y.,\u00a0Jia, J.: Mat: Mask-aware transformer for large hole image inpainting. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. pp. 10758\u201310768 (2022)","DOI":"10.1109\/CVPR52688.2022.01049"},{"key":"3702_CR47","doi-asserted-by":"crossref","unstructured":"Corneanu, C.,\u00a0Gadde, R., Martinez, A.\u00a0M.: Latentpaint: image inpainting in latent space with diffusion models. In: Proceedings of the IEEE\/CVF winter conference on applications of computer vision. pp. 4334\u20134343 (2024)","DOI":"10.1109\/WACV57701.2024.00428"},{"key":"3702_CR48","doi-asserted-by":"crossref","unstructured":"Zeng, Y.,\u00a0Fu, J.,\u00a0Chao, H.,\u00a0Guo, B.: Aggregated contextual transformations for high-resolution image inpainting. IEEE Trans Vis Comput Graph. (2022)","DOI":"10.1109\/TVCG.2022.3156949"},{"key":"3702_CR49","doi-asserted-by":"crossref","unstructured":"Li, J.,\u00a0Wang, N.,\u00a0Zhang, L.,\u00a0Du, B.,\u00a0Tao, D.: Recurrent feature reasoning for image inpainting. In: Proceedings of the IEEE\/CVF conference on computer vision and pattern recognition. pp. 7760\u20137768 (2020)","DOI":"10.1109\/CVPR42600.2020.00778"}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-024-03702-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-024-03702-x\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-024-03702-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,4,24]],"date-time":"2025-04-24T06:01:40Z","timestamp":1745474500000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-024-03702-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,11,16]]},"references-count":49,"journal-issue":{"issue":"7","published-print":{"date-parts":[[2025,5]]}},"alternative-id":["3702"],"URL":"https:\/\/doi.org\/10.1007\/s00371-024-03702-x","relation":{"has-preprint":[{"id-type":"doi","id":"10.21203\/rs.3.rs-4274598\/v1","asserted-by":"object"}]},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"value":"0178-2789","type":"print"},{"value":"1432-2315","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,11,16]]},"assertion":[{"value":"21 October 2024","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"16 November 2024","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have no conflict of interest to declare that are relevant to the content of this article.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}},{"value":"Not applicable.","order":4,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to participate"}},{"value":"Not applicable.","order":5,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent for publication"}}]}}