{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,1]],"date-time":"2026-04-01T17:59:18Z","timestamp":1775066358045,"version":"3.50.1"},"reference-count":62,"publisher":"Springer Science and Business Media LLC","issue":"4","license":[{"start":{"date-parts":[[2025,3,8]],"date-time":"2025-03-08T00:00:00Z","timestamp":1741392000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"},{"start":{"date-parts":[[2025,3,8]],"date-time":"2025-03-08T00:00:00Z","timestamp":1741392000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0"}],"funder":[{"DOI":"10.13039\/501100004479","name":"Natural Science Foundation of Jiangxi Province","doi-asserted-by":"publisher","award":["20224BAB212013"],"award-info":[{"award-number":["20224BAB212013"]}],"id":[{"id":"10.13039\/501100004479","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100004479","name":"Natural Science Foundation of Jiangxi Province","doi-asserted-by":"publisher","award":["20224BAB212008"],"award-info":[{"award-number":["20224BAB212008"]}],"id":[{"id":"10.13039\/501100004479","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62266020"],"award-info":[{"award-number":["62266020"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100019037","name":"Key Science and Technology Research Project in Jiangxi Province Department of Education","doi-asserted-by":"publisher","award":["GJJ2200830"],"award-info":[{"award-number":["GJJ2200830"]}],"id":[{"id":"10.13039\/501100019037","id-type":"DOI","asserted-by":"publisher"}]},{"name":"Jiangxi Provincial Key Laboratory of Multidimensional Intelligent Perception and Control of China","award":["2024SSY03161"],"award-info":[{"award-number":["2024SSY03161"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Complex Intell. Syst."],"published-print":{"date-parts":[[2025,4]]},"DOI":"10.1007\/s40747-025-01796-x","type":"journal-article","created":{"date-parts":[[2025,3,8]],"date-time":"2025-03-08T06:31:41Z","timestamp":1741415501000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Decoupled pixel-wise correction for abdominal multi-organ segmentation"],"prefix":"10.1007","volume":"11","author":[{"ORCID":"https:\/\/orcid.org\/0000-0001-6206-450X","authenticated-orcid":false,"given":"Xiangchun","family":"Yu","sequence":"first","affiliation":[]},{"given":"Longjun","family":"Ding","sequence":"additional","affiliation":[]},{"given":"Dingwen","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0001-7198-4199","authenticated-orcid":false,"given":"Jianqing","family":"Wu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-4289-7114","authenticated-orcid":false,"given":"Miaomiao","family":"Liang","sequence":"additional","affiliation":[]},{"given":"Jian","family":"Zheng","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1761-6659","authenticated-orcid":false,"given":"Wei","family":"Pang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,3,8]]},"reference":[{"key":"1796_CR1","unstructured":"Vaswani A et al (2017) Attention is all you need. In: Advances in neural information processing systems, vol. 30"},{"key":"1796_CR2","unstructured":"Bahdanau D, Cho K, Bengio Y (2014) Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473"},{"key":"1796_CR3","unstructured":"Devlin J, Chang M-W, Lee K, Toutanova K (2018) Bert: pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805"},{"key":"1796_CR4","doi-asserted-by":"crossref","unstructured":"Shaw P, Uszkoreit J, Vaswani A (2018) Self-attention with relative position representations. arXiv preprint arXiv:1803.02155","DOI":"10.18653\/v1\/N18-2074"},{"key":"1796_CR5","unstructured":"Shen Z, Zhang M, Zhao H, Yi S, Li H (2021) Efficient attention: Attention with linear complexities. In: Proceedings of the IEEE\/CVF winter Conference on applications of computer vision, 2021, pp. 3531\u20133539"},{"key":"1796_CR6","doi-asserted-by":"crossref","unstructured":"Woo S, Park J, Lee J-Y, Kweon IS (2018) Cbam: Convolutional block attention module. In: Proceedings of the European Conference on computer vision (ECCV), 2018, pp. 3\u201319","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"1796_CR7","doi-asserted-by":"crossref","unstructured":"Wang X, Girshick R, Gupta A, He K (2018) Non-local neural networks. In: Proceedings of the IEEE Conference on computer vision and pattern recognition, 2018, pp. 7794\u20137803","DOI":"10.1109\/CVPR.2018.00813"},{"key":"1796_CR8","doi-asserted-by":"crossref","unstructured":"Wang F et al (2017) Residual attention network for image classification. In: Proceedings of the IEEE Conference on computer vision and pattern recognition, 2017, pp. 3156\u20133164","DOI":"10.1109\/CVPR.2017.683"},{"key":"1796_CR9","unstructured":"Oktay O et al (2018) Attention u-net: Learning where to look for the pancreas. arXiv preprint arXiv:1804.03999"},{"key":"1796_CR10","doi-asserted-by":"crossref","unstructured":"Ni Z-L et al (2019) Raunet: residual attention u-net for semantic segmentation of cataract surgical instruments. In: International Conference on neural information processing, Springer, 2019, pp. 139\u2013149","DOI":"10.1007\/978-3-030-36711-4_13"},{"key":"#cr-split#-1796_CR11.1","doi-asserted-by":"crossref","unstructured":"Cai Y, Wang Y (2022) Ma-unet: an improved version of unet based on multi-scale and attention mechanism for medical image segmentation. In: Third International Conference on Electronics and Communication","DOI":"10.1117\/12.2628519"},{"key":"#cr-split#-1796_CR11.2","unstructured":"Network and Computer Technology (ECNCT 2021), SPIE, 2022, pp. 205-211"},{"key":"1796_CR12","unstructured":"Chen J et al (2021) Transunet: transformers make strong encoders for medical image segmentation. arXiv preprint arXiv:2102.04306"},{"key":"1796_CR13","doi-asserted-by":"crossref","unstructured":"Hatamizadeh A, Nath V, Tang Y, Yang D, Roth HR, Xu D (2021) Swin unetr: Swin transformers for semantic segmentation of brain tumors in mri images. In: International MICCAI Brainlesion Workshop, Springer, 2021, pp. 272\u2013284","DOI":"10.1007\/978-3-031-08999-2_22"},{"issue":"5","key":"1796_CR14","doi-asserted-by":"crossref","first-page":"1484","DOI":"10.1109\/TMI.2022.3230943","volume":"42","author":"X Huang","year":"2022","unstructured":"Huang X, Deng Z, Li D, Yuan X, Fu Y (2022) Missformer: An effective transformer for 2d medical image segmentation. IEEE Trans Med Imaging 42(5):1484\u20131494","journal-title":"IEEE Trans Med Imaging"},{"key":"1796_CR15","doi-asserted-by":"crossref","unstructured":"Roy S et al (2023) Mednext: transformer-driven scaling of convnets for medical image segmentation. In: International Conference on medical image computing and computer-assisted intervention, Springer, 2023, pp. 405\u2013415","DOI":"10.1007\/978-3-031-43901-8_39"},{"issue":"3","key":"1796_CR16","doi-asserted-by":"crossref","first-page":"331","DOI":"10.1007\/s41095-022-0271-y","volume":"8","author":"M-H Guo","year":"2022","unstructured":"Guo M-H et al (2022) Attention mechanisms in computer vision: a survey. Comput Vis Media 8(3):331\u2013368","journal-title":"Comput Vis Media"},{"key":"1796_CR17","doi-asserted-by":"crossref","unstructured":"Ye X, He Z, Heng W, Li Y (2023) Toward understanding the effectiveness of attention mechanism. AIP Adv 13(3)","DOI":"10.1063\/5.0141666"},{"key":"1796_CR18","unstructured":"Jain S, Wallace BC (2019) Attention is not explanation. arXiv preprint arXiv:1902.10186"},{"key":"1796_CR19","unstructured":"Serrano S, Smith NA (2019) Is attention interpretable? arXiv preprint arXiv:1906.03731"},{"key":"1796_CR20","doi-asserted-by":"crossref","unstructured":"Ionescu C, Vantzos O, Sminchisescu C (2015) Matrix backpropagation for deep networks with structured layers. In: Proceedings of the IEEE International Conference on computer vision, 2015, pp. 2965\u20132973","DOI":"10.1109\/ICCV.2015.339"},{"key":"1796_CR21","doi-asserted-by":"crossref","unstructured":"Wall ME, Rechtsteiner A, Rocha LM (2003) Singular value decomposition and principal component analysis. In: A practical approach to microarray data analysis, pp. 91\u2013109","DOI":"10.1007\/0-306-47815-3_5"},{"key":"1796_CR22","doi-asserted-by":"crossref","unstructured":"Hu J, Shen L, Sun G (2018) Squeeze-and-excitation networks. In: Proceedings of the IEEE Conference on computer vision and pattern recognition, 2018, pp. 7132\u20137141","DOI":"10.1109\/CVPR.2018.00745"},{"key":"1796_CR23","doi-asserted-by":"crossref","unstructured":"Cao Y, Xu J, Lin S, Wei F, Hu H (2019) Gcnet: non-local networks meet squeeze-excitation networks and beyond. In: Proceedings of the IEEE\/CVF International Conference on computer vision workshops, 2019, pp. 0\u20130","DOI":"10.1109\/ICCVW.2019.00246"},{"key":"1796_CR24","unstructured":"Dosovitskiy D et al (2020) An image is worth 16x16 words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929"},{"key":"1796_CR25","doi-asserted-by":"crossref","unstructured":"Liu Z et al (2021) Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on computer vision, 2021, pp. 10012\u201310022","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"1796_CR26","first-page":"9355","volume":"34","author":"X Chu","year":"2021","unstructured":"Chu X et al (2021) Twins: revisiting the design of spatial attention in vision transformers. Adv Neural Inf Process Syst 34:9355\u20139366","journal-title":"Adv Neural Inf Process Syst"},{"key":"1796_CR27","unstructured":"Geng Z, Guo M-H, Chen H, Li X, Wei K, Lin Z (2021) Is attention better than matrix decomposition? arXiv preprint arXiv:2109.04553"},{"key":"1796_CR28","unstructured":"Lee D, Seung HS (2000) Algorithms for non-negative matrix factorization. In: Advances in neural information processing systems, vol. 13"},{"issue":"6","key":"1796_CR29","doi-asserted-by":"crossref","first-page":"2325","DOI":"10.1109\/18.720541","volume":"44","author":"RM Gray","year":"1998","unstructured":"Gray RM, Neuhoff DL (1998) Quantization. IEEE Trans Inf Theory 44(6):2325\u20132383","journal-title":"IEEE Trans Inf Theory"},{"key":"1796_CR30","doi-asserted-by":"crossref","first-page":"143","DOI":"10.1023\/A:1007612920971","volume":"42","author":"IS Dhillon","year":"2001","unstructured":"Dhillon IS, Modha DS (2001) Concept decompositions for large sparse text data using clustering. Mach Learn 42:143\u2013175","journal-title":"Mach Learn"},{"issue":"11","key":"1796_CR31","doi-asserted-by":"crossref","first-page":"2514","DOI":"10.1109\/TMI.2018.2837502","volume":"37","author":"O Bernard","year":"2018","unstructured":"Bernard O et al (2018) Deep learning techniques for automatic MRI cardiac multi-structures segmentation and diagnosis: is the problem solved? IEEE Trans Med Imaging 37(11):2514\u20132525","journal-title":"IEEE Trans Med Imaging"},{"issue":"8","key":"1796_CR32","doi-asserted-by":"crossref","first-page":"10651","DOI":"10.1109\/TNNLS.2023.3243241","volume":"35","author":"Y Song","year":"2023","unstructured":"Song Y, Teoh JY-C, Choi K-S, Qin J (2023) Dynamic loss weighting for multiorgan segmentation in medical images. IEEE Trans Neural Netw Learn Syst 35(8):10651\u201310662","journal-title":"IEEE Trans Neural Netw Learn Syst"},{"key":"1796_CR33","unstructured":"Park J, Woo S, Lee J-Y, Kweon IS (2018) Bam: bottleneck attention module. arXiv preprint arXiv:1807.06514"},{"key":"1796_CR34","doi-asserted-by":"crossref","unstructured":"Liu H, Liu F, Fan X, Huang D (2021) Polarized self-attention: towards high-quality pixel-wise regression. arXiv preprint arXiv:2107.00782","DOI":"10.1016\/j.neucom.2022.07.054"},{"key":"1796_CR35","unstructured":"Xu K et al (2015) Show, attend and tell: Neural image caption generation with visual attention. In: International Conference on machine learning, PMLR, 2015, pp. 2048\u20132057"},{"key":"1796_CR36","doi-asserted-by":"crossref","unstructured":"Dai J et al (2017) Deformable convolutional networks. In: Proceedings of the IEEE International conference on computer vision, 2017, pp. 764\u2013773","DOI":"10.1109\/ICCV.2017.89"},{"key":"1796_CR37","doi-asserted-by":"crossref","unstructured":"Roy AG, Navab N, Wachinger C (2018) Concurrent spatial and channel \u2018squeeze & excitation\u2019 in fully convolutional networks. In: Medical Image Computing and Computer Assisted Intervention\u2013MICCAI 2018: 21st International Conference, Granada, Spain, September 16-20, 2018, Proceedings, Part I, Springer, 2018, pp. 421\u2013429","DOI":"10.1007\/978-3-030-00928-1_48"},{"key":"1796_CR38","doi-asserted-by":"crossref","unstructured":"Selvaraju RR, Cogswell M, Das A, Vedantam R, Parikh D, Batra D (2017) Grad-cam: visual explanations from deep networks via gradient-based localization. In: Proceedings of the IEEE International Conference on computer vision, 2017, pp. 618\u2013626","DOI":"10.1109\/ICCV.2017.74"},{"key":"1796_CR39","doi-asserted-by":"crossref","first-page":"211","DOI":"10.1016\/j.patcog.2016.11.008","volume":"65","author":"G Montavon","year":"2017","unstructured":"Montavon G, Lapuschkin S, Binder A, Samek W, M\u00fcller K-R (2017) Explaining nonlinear classification decisions with deep taylor decomposition. Pattern Recogn 65:211\u2013222","journal-title":"Pattern Recogn"},{"key":"1796_CR40","unstructured":"Guo M-H, Lu C-Z, Hou Q, Liu Z, Cheng M-M, Hu S-M (2022) Segnext: rethinking convolutional attention design for semantic segmentation. arXiv preprint arXiv:2209.08575"},{"key":"1796_CR41","doi-asserted-by":"crossref","unstructured":"Liu Z, Mao H, Wu C-Y, Feichtenhofer C, Darrell T, Xie S (2022) A convnet for the 2020s. In: Proceedings of the IEEE\/CVF Conference on computer vision and pattern recognition, 2022, pp. 11976\u201311986","DOI":"10.1109\/CVPR52688.2022.01167"},{"key":"1796_CR42","doi-asserted-by":"crossref","unstructured":"He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on computer vision and pattern recognition, 2016, pp. 770\u2013778","DOI":"10.1109\/CVPR.2016.90"},{"key":"1796_CR43","doi-asserted-by":"crossref","unstructured":"Qin Z, Zhang P, Wu F, Li X (2021) Fcanet: frequency channel attention networks. In: Proceedings of the IEEE\/CVF International Conference on computer vision, 2021, pp. 783\u2013792","DOI":"10.1109\/ICCV48922.2021.00082"},{"key":"1796_CR44","doi-asserted-by":"crossref","unstructured":"Wang Q, Wu B, Zhu P, Li P, Zuo W, Hu Q (2020) ECA-Net: efficient channel attention for deep convolutional neural networks. In: Proceedings of the IEEE\/CVF Conference on computer vision and pattern recognition, 2020, pp. 11534\u201311542","DOI":"10.1109\/CVPR42600.2020.01155"},{"key":"1796_CR45","doi-asserted-by":"crossref","unstructured":"Fu F et al (2019) Dual attention network for scene segmentation. In: Proceedings of the IEEE\/CVF Conference on computer vision and pattern recognition, 2019, pp. 3146\u20133154","DOI":"10.1109\/CVPR.2019.00326"},{"key":"1796_CR46","doi-asserted-by":"crossref","unstructured":"Zhao H, Jia J, Koltun V (2020) Exploring self-attention for image recognition. In: Proceedings of the IEEE\/CVF Conference on computer vision and pattern recognition, 2020, pp. 10076\u201310085","DOI":"10.1109\/CVPR42600.2020.01009"},{"key":"1796_CR47","doi-asserted-by":"crossref","unstructured":"Huang Z, Wang X, Huang L, Huang C, Wei Y, Liu W (2019) Ccnet: Criss-cross attention for semantic segmentation. In: Proceedings of the IEEE\/CVF International Conference on computer vision, 2019, pp 603\u2013612.","DOI":"10.1109\/ICCV.2019.00069"},{"key":"1796_CR48","doi-asserted-by":"crossref","unstructured":"Wang H, Zhu Y, Green B, Adam H, Yuille A, Chen L-C (2020) Axial-deeplab: stand-alone axial-attention for panoptic segmentation. In: Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part IV, Springer, 2020, pp. 108\u2013126","DOI":"10.1007\/978-3-030-58548-8_7"},{"key":"1796_CR49","doi-asserted-by":"crossref","first-page":"1110","DOI":"10.3389\/fgene.2019.01110","volume":"10","author":"Y Chen","year":"2019","unstructured":"Chen Y et al (2019) Channel-Unet: a spatial channel-wise convolutional neural network for liver and tumors segmentation. Front Genet 10:1110","journal-title":"Front Genet"},{"key":"1796_CR50","doi-asserted-by":"crossref","unstructured":"Long J, Shelhamer E, Darrell T (2015) Fully convolutional networks for semantic segmentation. In: Proceedings of the IEEE Conference on computer vision and pattern recognition, 2015, pp. 3431\u20133440.","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"1796_CR51","doi-asserted-by":"crossref","unstructured":"Ronneberger O, Fischer P, Brox T (2015) U-net: convolutional networks for biomedical image segmentation. In: Medical Image Computing and Computer-Assisted Intervention\u2013MICCAI 2015: 18th International Conference, Munich, Germany, October 5-9, 2015, Proceedings, Part III 18, Springer, 2015, pp. 234\u2013241","DOI":"10.1007\/978-3-319-24574-4_28"},{"issue":"11","key":"1796_CR52","doi-asserted-by":"crossref","first-page":"3229","DOI":"10.1109\/TMI.2023.3278461","volume":"42","author":"J Yang","year":"2023","unstructured":"Yang J, Jiao L, Shang R, Liu X, Li R, Xu L (2023) Ept-net: Edge perception transformer for 3d medical image segmentation. IEEE Trans Med Imaging 42(11):3229\u20133243","journal-title":"IEEE Trans Med Imaging"},{"key":"1796_CR53","unstructured":"Lee HH, Bao S, Huo Y, Landman BA (2022) 3d ux-net: A large kernel volumetric convnet modernizing hierarchical transformer for medical image segmentation. arXiv preprint arXiv:2209.15076"},{"key":"1796_CR54","doi-asserted-by":"crossref","unstructured":"Li X, Wang W, Hu X, Yang J (2019) Selective kernel networks. In: Proceedings of the IEEE\/CVF Conference on computer vision and pattern recognition, 2019, pp 510\u2013519.","DOI":"10.1109\/CVPR.2019.00060"},{"key":"1796_CR55","doi-asserted-by":"crossref","unstructured":"Cao H, Wang Y, Chen J, Jiang D, Zhang X, Tian Q, Wang M (2022) Swin-unet: Unet-like pure transformer for medical image segmentation. In: European conference on computer vision. Springer Nature Switzerland, Cham","DOI":"10.1007\/978-3-031-25066-8_9"},{"key":"1796_CR56","doi-asserted-by":"crossref","unstructured":"Ma J, Zhang Y, Gu S, Ge C, Ma S, Young A, Wang B (2023) Unleashing the strengths of unlabeled data in pan-cancer abdominal organ quantification: the flare22 challenge. arXiv preprint arXiv:2308.05862","DOI":"10.1016\/S2589-7500(24)00154-7"},{"key":"1796_CR57","unstructured":"Contributors (2020) MMSegmentation. \"OpenMMLab semantic segmentation toolbox and benchmark.\" https:\/\/github.com\/open-mmlab\/mmsegmentation"},{"key":"1796_CR58","doi-asserted-by":"crossref","unstructured":"Han D et al (2025) Agent attention: on the integration of softmax and linear attention. In: European Conference on computer vision, Springer, 2025, pp. 124\u2013140","DOI":"10.1007\/978-3-031-72973-7_8"},{"issue":"4","key":"1796_CR59","doi-asserted-by":"crossref","first-page":"733","DOI":"10.1007\/s41095-023-0364-2","volume":"9","author":"M-H Guo","year":"2023","unstructured":"Guo M-H, Lu C-Z, Liu Z-N, Cheng M-M, Hu S-M (2023) Visual attention network. Computational Visual Media 9(4):733\u2013752","journal-title":"Computational Visual Media"},{"key":"1796_CR60","doi-asserted-by":"crossref","unstructured":"Azad R et al (2024) Beyond self-attention: deformable large kernel attention for medical image segmentation. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, 2024, pp. 1287\u20131297","DOI":"10.1109\/WACV57701.2024.00132"},{"key":"1796_CR61","doi-asserted-by":"crossref","unstructured":"Mansourian AM, Ahmadi R, Kasaei S (2023) Aicsd: adaptive inter-class similarity distillation for semantic segmentation. arXiv preprint arXiv:2308.04243","DOI":"10.2139\/ssrn.4992647"}],"container-title":["Complex &amp; Intelligent Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-025-01796-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s40747-025-01796-x\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s40747-025-01796-x.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,30]],"date-time":"2025-03-30T21:17:57Z","timestamp":1743369477000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s40747-025-01796-x"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3,8]]},"references-count":62,"journal-issue":{"issue":"4","published-print":{"date-parts":[[2025,4]]}},"alternative-id":["1796"],"URL":"https:\/\/doi.org\/10.1007\/s40747-025-01796-x","relation":{},"ISSN":["2199-4536","2198-6053"],"issn-type":[{"value":"2199-4536","type":"print"},{"value":"2198-6053","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,3,8]]},"assertion":[{"value":"17 September 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 January 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"8 March 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"On behalf of all authors, the corresponding authors state that there is no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"203"}}