{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T11:04:43Z","timestamp":1773486283460,"version":"3.50.1"},"reference-count":47,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2026,1,5]],"date-time":"2026-01-05T00:00:00Z","timestamp":1767571200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2026,1,5]],"date-time":"2026-01-05T00:00:00Z","timestamp":1767571200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62006150"],"award-info":[{"award-number":["62006150"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100003399","name":"Science and Technology Commission of Shanghai Municipality","doi-asserted-by":"publisher","award":["21DZ2203100"],"award-info":[{"award-number":["21DZ2203100"]}],"id":[{"id":"10.13039\/501100003399","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Pattern Anal Applic"],"published-print":{"date-parts":[[2026,3]]},"DOI":"10.1007\/s10044-025-01603-w","type":"journal-article","created":{"date-parts":[[2026,1,5]],"date-time":"2026-01-05T07:06:35Z","timestamp":1767596795000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["CRA-U: lightweight U-Net with component ranking attention for skin lesion segmentation"],"prefix":"10.1007","volume":"29","author":[{"given":"Zhan-Peng","family":"Ji","sequence":"first","affiliation":[]},{"given":"Yan-Xu","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Yu-Jie","family":"Xiong","sequence":"additional","affiliation":[]},{"given":"Xi-Jiong","family":"Xie","sequence":"additional","affiliation":[]},{"given":"Chun-Ming","family":"Xia","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2026,1,5]]},"reference":[{"key":"1603_CR1","doi-asserted-by":"publisher","unstructured":"Al-Dhabyani W, Gomaa M, Khaled H et al (2020) Dataset of breast ultrasound images. In: Data in brief, p 104863. https:\/\/doi.org\/10.1016\/j.dib.2019.104863","DOI":"10.1016\/j.dib.2019.104863"},{"key":"1603_CR2","doi-asserted-by":"crossref","unstructured":"Azad R, Arimond R, Aghdam EK et al (2023) DAE-Former: dual attention-guided efficient transformer for medical image segmentation, pp 83\u201395","DOI":"10.1007\/978-3-031-46005-0_8"},{"issue":"12","key":"1603_CR3","doi-asserted-by":"publisher","first-page":"1247","DOI":"10.1038\/s41592-019-0612-7","volume":"16","author":"J Caicedo","year":"2019","unstructured":"Caicedo J, Goodman A, Karhohs K et al (2019) Nucleus segmentation across imaging experiments: the 2018 data science bowl. Nat Methods 16(12):1247\u20131253. https:\/\/doi.org\/10.1038\/s41592-019-0612-7","journal-title":"Nat Methods"},{"key":"1603_CR4","doi-asserted-by":"crossref","unstructured":"Cao H, Wang Y, Chen J et al (2022) SWIN-UNet: UNet-like pure transformer for medical image segmentation, pp 205\u2013218","DOI":"10.1007\/978-3-031-25066-8_9"},{"key":"1603_CR5","doi-asserted-by":"crossref","unstructured":"Chang S, Wang P, Lin M et al (2023) Making vision transformers efficient from a token sparsification view, pp 6195\u20136205","DOI":"10.1109\/CVPR52729.2023.00600"},{"issue":"5","key":"1603_CR6","doi-asserted-by":"publisher","first-page":"1289","DOI":"10.1109\/TMI.2022.3226268","volume":"42","author":"G Chen","year":"2023","unstructured":"Chen G, Li L, Dai Y et al (2023) AAU-Net: an adaptive attention U-Net for breast lesions segmentation in ultrasound images. IEEE Trans Med Imaging 42(5):1289\u20131300. https:\/\/doi.org\/10.1109\/TMI.2022.3226268","journal-title":"IEEE Trans Med Imaging"},{"key":"1603_CR7","unstructured":"Chen J, Lu Y, Yu Q et al (2021) TransUNet: transformers make strong encoders for medical image segmentation. arXiv preprint arXiv:2102.04306"},{"key":"1603_CR8","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2024.106726","volume":"98","author":"YX Chen","year":"2024","unstructured":"Chen YX, Xiong YJ, Qiu XH et al (2024) Harmonious parameters and performance: lightweight convolutional stage and local feature weighted fusion MLP for medical image segmentation. Biomed Signal Process Control 98:106726. https:\/\/doi.org\/10.1016\/j.bspc.2024.106726","journal-title":"Biomed Signal Process Control"},{"key":"1603_CR9","unstructured":"Choromanski K, Likhosherstov V, Dohan D et al (2020) Rethinking attention with performers"},{"key":"1603_CR10","unstructured":"Codella N, Rotemberg V, Tschandl P et al (2019) Skin lesion analysis toward melanoma detection 2018: a challenge hosted by the international skin imaging collaboration (ISIC). arXiv preprint arXiv:1902.03368"},{"key":"1603_CR11","doi-asserted-by":"crossref","unstructured":"Codella NC, Gutman D, Celebi ME et al (2018) Skin lesion analysis toward melanoma detection: a challenge at the 2017 international symposium on biomedical imaging (ISBI), hosted by the international skin imaging collaboration (ISIC). In: 2018 IEEE 15th international symposium on biomedical imaging (ISBI 2018), IEEE, pp 168\u2013172","DOI":"10.1109\/ISBI.2018.8363547"},{"key":"1603_CR12","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2024.103241","volume":"97","author":"D Dai","year":"2024","unstructured":"Dai D, Dong C, Yan Q et al (2024) I2U-Net: a dual-path u-net with rich information interaction for medical image segmentation. Med Image Anal 97:103241. https:\/\/doi.org\/10.1016\/j.media.2024.103241","journal-title":"Med Image Anal"},{"key":"1603_CR13","unstructured":"Dosovitskiy A, Beyer L, Kolesnikov A et al (2020) An image is worth $$16\\times 16$$ words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929"},{"issue":"1","key":"1603_CR14","doi-asserted-by":"publisher","first-page":"17","DOI":"10.1007\/s10044-024-01384-8","volume":"28","author":"Q Gao","year":"2025","unstructured":"Gao Q, Wang Y, Zhou F et al (2025) MSFM-UNET: enhancing medical image segmentation with multi-scale and multi-view frequency fusion. Pattern Anal Appl 28(1):17. https:\/\/doi.org\/10.1007\/s10044-024-01384-8","journal-title":"Pattern Anal Appl"},{"key":"1603_CR15","unstructured":"Gutman D, Codella NCF, Celebi E et al (2016) Skin lesion analysis toward melanoma detection: a challenge at the international symposium on biomedical imaging (ISBI) arXiv preprint arXiv:1605.01397"},{"key":"1603_CR16","doi-asserted-by":"crossref","unstructured":"Han D, Pan X, Han Y et al (2023) Flatten transformer: vision transformer using focused linear attention. In: Proceedings of the IEEE\/CVF international conference on computer vision, pp 5961\u20135971","DOI":"10.1109\/ICCV51070.2023.00548"},{"key":"1603_CR17","doi-asserted-by":"publisher","unstructured":"Han D, Ye T, Han Y et\u00a0al (2023) Agent attention: on the integration of Softmax and linear attention. arXiv preprint arXiv:2312.08874. https:\/\/doi.org\/10.48550\/arXiv.2312.08874","DOI":"10.48550\/arXiv.2312.08874"},{"key":"1603_CR18","doi-asserted-by":"crossref","unstructured":"Hassani A, Walton S, Li J et al (2023) Neighborhood attention transformer, pp 6185\u20136194","DOI":"10.1109\/CVPR52729.2023.00599"},{"issue":"3","key":"1603_CR19","doi-asserted-by":"publisher","first-page":"128","DOI":"10.1007\/s10044-025-01506-w","volume":"28","author":"NK Hoang","year":"2025","unstructured":"Hoang NK, Nguyen DH, Tran TT et al (2025) DermoMamba: a cross-scale mamba-based model with guide fusion loss for skin lesion segmentation in dermoscopy images. Pattern Anal Appl 28(3):128. https:\/\/doi.org\/10.1007\/s10044-025-01506-w","journal-title":"Pattern Anal Appl"},{"key":"1603_CR20","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2023.105417","volume":"87","author":"L Huang","year":"2024","unstructured":"Huang L, Zhao YG, Yang TJ (2024) Skin lesion image segmentation by using backchannel filling CNN and level sets. Biomed Signal Process Control 87:105417. https:\/\/doi.org\/10.1016\/j.bspc.2023.105417","journal-title":"Biomed Signal Process Control"},{"issue":"5","key":"1603_CR21","doi-asserted-by":"publisher","first-page":"1484","DOI":"10.1109\/TMI.2022.3230943","volume":"42","author":"X Huang","year":"2023","unstructured":"Huang X, Deng Z, Li D et al (2023) MISSFormer: an effective transformer for 2d medical image segmentation. IEEE Trans Med Imaging 42(5):1484\u20131494. https:\/\/doi.org\/10.1109\/TMI.2022.3230943","journal-title":"IEEE Trans Med Imaging"},{"key":"1603_CR22","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2023.105895","volume":"91","author":"Z Huang","year":"2024","unstructured":"Huang Z, Deng H, Yin S et al (2024) ADF-Net: a novel adaptive dual-stream encoding and focal attention decoding network for skin lesion segmentation. Biomed Signal Process Control 91:105895. https:\/\/doi.org\/10.1016\/j.bspc.2023.105895","journal-title":"Biomed Signal Process Control"},{"key":"1603_CR23","unstructured":"Katharopoulos A, Vyas A, Pappas N et al (2020) Transformers are RNNS: fast autoregressive transformers with linear attention, pp 5156\u20135165"},{"issue":"7","key":"1603_CR24","doi-asserted-by":"publisher","first-page":"2325","DOI":"10.1109\/TMI.2023.3247814","volume":"42","author":"X Lin","year":"2023","unstructured":"Lin X, Yu L, Cheng KT et al (2023) The lighter the better: rethinking transformers in medical image segmentation through adaptive pruning. IEEE Trans Med Imaging 42(7):2325\u20132337. https:\/\/doi.org\/10.1109\/TMI.2023.3247814","journal-title":"IEEE Trans Med Imaging"},{"issue":"3","key":"1603_CR25","doi-asserted-by":"publisher","first-page":"93","DOI":"10.1007\/s10044-024-01307-7","volume":"27","author":"S Liu","year":"2024","unstructured":"Liu S, Wang P, Lin Y et al (2024) SMRU-Net: skin disease image segmentation using channel-space separate attention with depthwise separable convolutions. Pattern Anal Appl 27(3):93. https:\/\/doi.org\/10.1007\/s10044-024-01307-7","journal-title":"Pattern Anal Appl"},{"key":"1603_CR26","doi-asserted-by":"crossref","unstructured":"Liu Z, Lin Y, Cao Y et al (2021) Swin transformer: hierarchical vision transformer using shifted windows, pp 10012\u201310022","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"1603_CR27","doi-asserted-by":"crossref","unstructured":"Long J, Shelhamer E, Darrell T (2015) Fully convolutional networks for semantic segmentation, pp 3431\u20133440","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"1603_CR28","doi-asserted-by":"publisher","first-page":"1289","DOI":"10.1016\/j.compbiomed.2022.106533","volume":"153","author":"Z Ma","year":"2023","unstructured":"Ma Z, Qi Y, Xu C et al (2023) ATFE-Net: axial transformer and feature enhancement-based CNN for ultrasound breast mass segmentation. Comput Biol Med 153:1289\u20131300. https:\/\/doi.org\/10.1016\/j.compbiomed.2022.106533","journal-title":"Comput Biol Med"},{"key":"1603_CR29","doi-asserted-by":"crossref","unstructured":"Mendon\u00e7a T, Ferreira PM, Marques JS et al (2013) Ph 2-a dermoscopic image database for research and benchmarking, pp 5437\u20135440","DOI":"10.1109\/EMBC.2013.6610779"},{"key":"1603_CR30","unstructured":"Oktay O, Schlemper J, Folgoc LL et al (2018) Attention U-Net: learning where to look for the pancreas. arXiv preprint arXiv:1804.03999"},{"key":"1603_CR31","doi-asserted-by":"crossref","unstructured":"Ronneberger O, Fischer P, Brox T (2015) U-Net: convolutional networks for biomedical image segmentation. In: Medical image computing and computer-assisted intervention\u2014MICCAI 2015. Springer, Cham, pp 234\u2013241","DOI":"10.1007\/978-3-319-24574-4_28"},{"issue":"5","key":"1603_CR32","doi-asserted-by":"publisher","first-page":"2252","DOI":"10.1109\/JBHI.2021.3138024","volume":"26","author":"A Srivastava","year":"2021","unstructured":"Srivastava A, Jha D, Chanda S et al (2021) MSRF-Net: a multi-scale residual fusion network for biomedical image segmentation. IEEE J Biomed Health Inform 26(5):2252\u20132263. https:\/\/doi.org\/10.1109\/JBHI.2021.3138024","journal-title":"IEEE J Biomed Health Inform"},{"key":"1603_CR33","doi-asserted-by":"crossref","unstructured":"Tomar NK, Jha D, Ali S et al (2021) DDANet: dual decoder attention network for automatic polyp segmentation, pp 307\u2013314","DOI":"10.1007\/978-3-030-68793-9_23"},{"key":"1603_CR34","doi-asserted-by":"crossref","unstructured":"Valanarasu JMJ, Patel VM (2022) UNEXT: MLP-based rapid medical image segmentation network. In: Medical image computing and computer assisted intervention\u2014MICCAI 2022. Springer, Cham, pp 23\u201333","DOI":"10.1007\/978-3-031-16443-9_3"},{"key":"1603_CR35","doi-asserted-by":"crossref","unstructured":"Vaswani A, Ramachandran P, Srinivas A et al (2021) Scaling local self-attention for parameter efficient visual backbones, pp 12894\u201312904","DOI":"10.1109\/CVPR46437.2021.01270"},{"key":"1603_CR36","doi-asserted-by":"crossref","unstructured":"Wang H, Cao P, Wang J et al (2022) UCTransNet: rethinking the skip connections in U-Net from a channel-wise perspective with transformer, pp 2441\u20132449","DOI":"10.1609\/aaai.v36i3.20144"},{"key":"1603_CR37","doi-asserted-by":"publisher","DOI":"10.1016\/j.patter.2025.101298","volume":"6","author":"R Wu","year":"2024","unstructured":"Wu R, Liu Y, Ning G et al (2024) Ultralight VM-UNET: parallel vision mamba significantly reduces parameters for skin lesion segmentation. Patterns 6:101298","journal-title":"Patterns"},{"key":"1603_CR38","doi-asserted-by":"publisher","DOI":"10.1016\/j.neucom.2025.129447","volume":"624","author":"R Wu","year":"2025","unstructured":"Wu R, Liu Y, Liang P et al (2025) H-VMUNET: high-order vision mamba UNet for medical image segmentation. Neurocomputing 624:129447","journal-title":"Neurocomputing"},{"key":"1603_CR39","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2025.107646","volume":"105","author":"R Wu","year":"2025","unstructured":"Wu R, Pan L, Liang P et al (2025) SK-VM++: mamba assists skip-connections for medical image segmentation. Biomed Signal Process Control 105:107646","journal-title":"Biomed Signal Process Control"},{"key":"1603_CR40","doi-asserted-by":"crossref","unstructured":"Xiong Y, Zeng Z, Chakraborty R et al (2021) Nystr\u00f6mformer: a nystr\u00f6m-based algorithm for approximating self-attention, pp 14138\u201314148","DOI":"10.1609\/aaai.v35i16.17664"},{"key":"1603_CR41","doi-asserted-by":"publisher","DOI":"10.1016\/j.compbiomed.2023.106626","volume":"154","author":"Q Xu","year":"2023","unstructured":"Xu Q, Ma Z, He N et al (2023) DCSAU-Net: a deeper and more compact split-attention U-Net for medical image segmentation. Comput Biol Med 154:106626. https:\/\/doi.org\/10.1016\/j.compbiomed.2023.106626","journal-title":"Comput Biol Med"},{"key":"1603_CR42","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2024.107242","volume":"101","author":"B Yang","year":"2024","unstructured":"Yang B, Zhang R, Peng H et al (2024) SLP-Net: an efficient lightweight network for segmentation of skin lesions. Biomed Signal Process Control 101:107242. https:\/\/doi.org\/10.1016\/j.bspc.2024.107242","journal-title":"Biomed Signal Process Control"},{"key":"1603_CR43","doi-asserted-by":"crossref","unstructured":"You H, Xiong Y, Dai X et al (2023) Castling-ViT: compressing self-attention via switching towards linear-angular attention at vision transformer inference, pp 14431\u201314442","DOI":"10.1109\/CVPR52729.2023.01387"},{"key":"1603_CR44","doi-asserted-by":"publisher","unstructured":"Yu Z, Yu L, Zheng W et al (2023) EIU-Net: enhanced feature extraction and improved skip connections in U-Net for skin lesion segmentation. https:\/\/doi.org\/10.1016\/j.compbiomed.2023.107081","DOI":"10.1016\/j.compbiomed.2023.107081"},{"issue":"7","key":"1603_CR45","doi-asserted-by":"publisher","first-page":"4251","DOI":"10.1109\/LRA.2023.3280818","volume":"8","author":"Y Zhang","year":"2023","unstructured":"Zhang Y, Zhang J (2023) GFANet: group fusion aggregation network for real time stereo matching. IEEE Robot Autom Lett 8(7):4251\u20134258. https:\/\/doi.org\/10.1109\/LRA.2023.3280818","journal-title":"IEEE Robot Autom Lett"},{"key":"1603_CR46","doi-asserted-by":"crossref","unstructured":"Zhou Z, Rahman Siddiquee MM, Tajbakhsh N et al (2018) UNet++: a nested U-Net architecture for medical image segmentation, pp 3\u201311","DOI":"10.1007\/978-3-030-00889-5_1"},{"key":"1603_CR47","unstructured":"Zhu M, Tang Y, Han K (2021) Vision transformer pruning. arXiv preprint arXiv:2104.08500"}],"container-title":["Pattern Analysis and Applications"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10044-025-01603-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s10044-025-01603-w","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s10044-025-01603-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,3,14]],"date-time":"2026-03-14T10:38:31Z","timestamp":1773484711000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s10044-025-01603-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2026,1,5]]},"references-count":47,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2026,3]]}},"alternative-id":["1603"],"URL":"https:\/\/doi.org\/10.1007\/s10044-025-01603-w","relation":{},"ISSN":["1433-7541","1433-755X"],"issn-type":[{"value":"1433-7541","type":"print"},{"value":"1433-755X","type":"electronic"}],"subject":[],"published":{"date-parts":[[2026,1,5]]},"assertion":[{"value":"8 August 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 December 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 January 2026","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"22"}}