{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T16:11:52Z","timestamp":1775578312044,"version":"3.50.1"},"reference-count":54,"publisher":"Springer Science and Business Media LLC","issue":"10","license":[{"start":{"date-parts":[[2025,2,14]],"date-time":"2025-02-14T00:00:00Z","timestamp":1739491200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,2,14]],"date-time":"2025-02-14T00:00:00Z","timestamp":1739491200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"DOI":"10.13039\/501100018542","name":"Natural Science Foundation of Sichuan Province","doi-asserted-by":"publisher","award":["2024NSFSC0664"],"award-info":[{"award-number":["2024NSFSC0664"]}],"id":[{"id":"10.13039\/501100018542","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Vis Comput"],"published-print":{"date-parts":[[2025,8]]},"DOI":"10.1007\/s00371-025-03832-w","type":"journal-article","created":{"date-parts":[[2025,2,13]],"date-time":"2025-02-13T23:33:47Z","timestamp":1739489627000},"page":"7703-7721","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":8,"title":["Dynamic neighbourhood-enhanced UNet with interwoven fusion for medical image segmentation"],"prefix":"10.1007","volume":"41","author":[{"given":"Liming","family":"Wan","sequence":"first","affiliation":[]},{"given":"Lin","family":"Song","sequence":"additional","affiliation":[]},{"given":"Ying","family":"Zhou","sequence":"additional","affiliation":[]},{"given":"Chenrui","family":"Kang","sequence":"additional","affiliation":[]},{"given":"Shijian","family":"Zheng","sequence":"additional","affiliation":[]},{"given":"Guo","family":"Chen","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,14]]},"reference":[{"key":"3832_CR1","doi-asserted-by":"crossref","unstructured":"You, C., Dai, W., Liu, F., Min, Y., Dvornek, N.C., Li, X., Clifton, D.A., Lawrence Staib, and James\u00a0S Duncan. Mine your own anatomy: revisiting medical image segmentation with extremely limited labels. IEEE Trans. Pattern Anal. Mach. Intell., (2024)","DOI":"10.1109\/TPAMI.2024.3461321"},{"key":"3832_CR2","first-page":"29582","volume":"35","author":"C You","year":"2022","unstructured":"You, C., Zhao, R., Liu, F., Dong, S., Chinchali, S., Topcu, U., Staib, L., Duncan, J.: Class-aware adversarial transformers for medical image segmentation. Adv. Neural Inf. Process. Syst. 35, 29582\u201329596 (2022)","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"3832_CR3","doi-asserted-by":"crossref","unstructured":"Long, J., Evan, S., Trevor, D.,: Fully convolutional networks for semantic segmentation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3431\u20133440, (2015)","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"3832_CR4","doi-asserted-by":"crossref","unstructured":"Ronneberger, O., Fischer, P., Brox, T.,: U-net: Convolutional networks for biomedical image segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 234\u2013241. Springer, (2015)","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"3832_CR5","doi-asserted-by":"crossref","unstructured":"Xiao, X., Lian, S., Luo, Z., Li, S.,: Weighted res-unet for high-quality retina vessel segmentation. In: 2018 9th International Conference on Information Technology in Medicine and Education (ITME), pp. 327\u2013331. IEEE, (2018)","DOI":"10.1109\/ITME.2018.00080"},{"key":"3832_CR6","doi-asserted-by":"crossref","unstructured":"Jha, D., Smedsrud, P.H., Johansen, D., de Lange, T., Johansen, H\u00e5vard. D., Halvorsen, P., Riegler, M.A.: A comprehensive study on colorectal polyp segmentation with resunet++, conditional random field and test-time augmentation. IEEE J. Biomed. Health Inf. 25(6), 2029\u20132040 (2021)","DOI":"10.1109\/JBHI.2021.3049304"},{"key":"3832_CR7","doi-asserted-by":"publisher","DOI":"10.1016\/j.compbiomed.2023.106626","volume":"154","author":"Q Xu","year":"2023","unstructured":"Xu, Q., Ma, Z., Na, H.E., Duan, W.: A deeper and more compact split-attention u-net for medical image segmentation. Comput. Biol. Med. 154, 106626 (2023)","journal-title":"Comput. Biol. Med."},{"key":"3832_CR8","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et\u00a0al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint[SPACE]arXiv:2010.11929, (2020)"},{"key":"3832_CR9","unstructured":"Chen, J., Lu, Y., Yu, Q., Luo, X., Adeli, E., Wang, Y., Lu, L., Yuille, A.L., Zhou, Y.,: Transunet: transformers make strong encoders for medical image segmentation. arXiv preprint[SPACE]arXiv:2102.04306, (2021)"},{"key":"3832_CR10","doi-asserted-by":"crossref","unstructured":"Gao, Y., Zhou, M., Metaxas, D.N.,: Utnet: a hybrid transformer architecture for medical image segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 61\u201371. Springer, (2021)","DOI":"10.1007\/978-3-030-87199-4_6"},{"key":"3832_CR11","doi-asserted-by":"crossref","unstructured":"Valanarasu, J.M.J., Oza, P., Hacihaliloglu, I., Patel, V.M.,: Medical transformer: gated axial-attention for medical image segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 36\u201346. Springer, (2021)","DOI":"10.1007\/978-3-030-87193-2_4"},{"key":"3832_CR12","unstructured":"Cao, H., Wang, Y., Chen, J., Jiang, D., Zhang, X., Tian, Q., Wang, M.,: Swin-unet: Unet-like pure transformer for medical image segmentation. arXiv preprint [SPACE]arXiv:2105.05537, (2021)"},{"issue":"9","key":"3832_CR13","doi-asserted-by":"publisher","first-page":"2228","DOI":"10.1109\/TMI.2022.3161829","volume":"41","author":"C You","year":"2022","unstructured":"You, C., Zhou, Y., Zhao, R., Staib, L., Duncan, J.S.: Simcvd: simple contrastive voxel-wise representation distillation for semi-supervised medical image segmentation. IEEE Trans. Med. Imag. 41(9), 2228\u20132237 (2022)","journal-title":"IEEE Trans. Med. Imag."},{"key":"3832_CR14","doi-asserted-by":"publisher","unstructured":"You, C., Dai, W., Min, Y., Liu, F., Clifton, D., Zhou, S.K., Staib, L., Duncan, J.: Rethinking semi-supervised medical image segmentation: a variance-reduction perspective. Adv. Neural Inf. Process. Syst. (2024). https:\/\/doi.org\/10.48550\/arXiv.2302.01735","DOI":"10.48550\/arXiv.2302.01735"},{"key":"3832_CR15","doi-asserted-by":"crossref","unstructured":"You, C., Dai, W., Min, Y., Staib, L., Sekhon, J., Duncan, J.S.,: Action++: improving semi-supervised medical image segmentation with adaptive anatomical contrast. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 194\u2013205. Springer, (2023)","DOI":"10.1007\/978-3-031-43901-8_19"},{"key":"3832_CR16","doi-asserted-by":"crossref","unstructured":"Dai, J., Qi, H., Xiong, Y., Li, Y., Zhang, G., Hu, H., Wei, Y.,: Deformable convolutional networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 764\u2013773, (2017)","DOI":"10.1109\/ICCV.2017.89"},{"key":"3832_CR17","doi-asserted-by":"crossref","unstructured":"Zhu, X., Hu, H., Lin, S., Dai, J.: Deformable convnets v2: More deformable, better results. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 9308\u20139316, (2019)","DOI":"10.1109\/CVPR.2019.00953"},{"key":"3832_CR18","doi-asserted-by":"crossref","unstructured":"Azad, R., Niggemeier, L., H\u00fcttemann, M., Kazerouni, A., Aghdam, E.K., Velichko, Y., Bagci, U., Merhof, D.,: Beyond self-attention: Deformable large kernel attention for medical image segmentation. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 1287\u20131297, (2024)","DOI":"10.1109\/WACV57701.2024.00132"},{"key":"3832_CR19","doi-asserted-by":"crossref","unstructured":"Qi, Y., He, Y., Qi, X., Zhang, Y., Yang, G.,: Dynamic snake convolution based on topological geometric constraints for tubular structure segmentation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6070\u20136079, (2023)","DOI":"10.1109\/ICCV51070.2023.00558"},{"key":"3832_CR20","doi-asserted-by":"crossref","unstructured":"Zhou, Z, Siddiquee, M.M.R., Tajbakhsh, N., Liang, J.,: Unet++: A nested u-net architecture for medical image segmentation. In: Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support, pp. 3\u201311. Springer, (2018)","DOI":"10.1007\/978-3-030-00889-5_1"},{"key":"3832_CR21","doi-asserted-by":"crossref","unstructured":"Huang, H., Lin, L., Tong, R., Hu, H., Zhang, Q., Iwamoto, Y., Han, X., Chen, Y-W., Wu, J.,: Unet 3+: A full-scale connected unet for medical image segmentation. In: ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1055\u20131059. IEEE, (2020)","DOI":"10.1109\/ICASSP40776.2020.9053405"},{"key":"3832_CR22","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2023.109728","volume":"142","author":"G Chen","year":"2023","unstructured":"Chen, G., Li, L., Zhang, J., Dai, Yu.: Rethinking the unpretentious u-net for medical ultrasound image segmentation. Pattern Recognit. 142, 109728 (2023)","journal-title":"Pattern Recognit."},{"key":"3832_CR23","doi-asserted-by":"crossref","unstructured":"Ruan, J., Xie, M., Gao, J., Liu, T., Fu, Y.,: Ege-unet: an efficient group enhanced unet for skin lesion segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 481\u2013490. Springer, (2023)","DOI":"10.1007\/978-3-031-43901-8_46"},{"key":"3832_CR24","unstructured":"Oktay, O., Schlemper, J., Folgoc, L.L., Lee, M., Heinrich, M., Misawa, Kazunari, Mori, Kensaku, McDonagh, Steven, Hammerla, Nils\u00a0Y, Kainz, Bernhard, et\u00a0al.: Attention u-net: Learning where to look for the pancreas. arXiv preprint [SPACE]arXiv:1804.03999, (2018)"},{"key":"3832_CR25","doi-asserted-by":"crossref","unstructured":"Ruan, J., Xiang, S., Xie, M., Liu, T., Fu, Y.,: Malunet: a multi-attention and light-weight unet for skin lesion segmentation. In: 2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM), pp. 1150\u20131156. IEEE, (2022)","DOI":"10.1109\/BIBM55620.2022.9995040"},{"issue":"5","key":"3832_CR26","doi-asserted-by":"publisher","first-page":"1484","DOI":"10.1109\/TMI.2022.3230943","volume":"42","author":"X Huang","year":"2022","unstructured":"Huang, X., Deng, Z., Li, D., Yuan, X., Ying, F.: Missformer: an effective transformer for 2d medical image segmentation. IEEE Trans. Med. Imag. 42(5), 1484\u20131494 (2022)","journal-title":"IEEE Trans. Med. Imag."},{"key":"3832_CR27","doi-asserted-by":"crossref","unstructured":"Huang, H., Xie, S., Lin, L., Iwamoto, Y., Han, X., Chen, Y-W., Tong, R.,: Scaleformer: revisiting the transformer-based backbones from a scale-wise perspective for medical image segmentation. arXiv preprint [SPACE]arXiv:2207.14552, (2022)","DOI":"10.24963\/ijcai.2022\/135"},{"key":"3832_CR28","first-page":"2441","volume":"36","author":"H Wang","year":"2022","unstructured":"Wang, H., Cao, P., Wang, J., Zaiane, O.R.: Uctransnet: rethinking the skip connections in u-net from a channel-wise perspective with transformer. Proce. AAAI Confer. Artif. Intell. 36, 2441\u20132449 (2022)","journal-title":"Proce. AAAI Confer. Artif. Intell."},{"issue":"12","key":"3832_CR29","doi-asserted-by":"publisher","first-page":"3446","DOI":"10.1109\/TMI.2021.3087857","volume":"40","author":"R Liu","year":"2021","unstructured":"Liu, R., Liu, M., Sheng, B., Li, H., Li, P., Song, H., Zhang, P., Jiang, L., Shen, D.: Nhbs-net: a feature fusion attention network for ultrasound neonatal hip bone segmentation. IEEE Trans. Med. Imag. 40(12), 3446\u20133458 (2021)","journal-title":"IEEE Trans. Med. Imag."},{"key":"3832_CR30","doi-asserted-by":"publisher","DOI":"10.1007\/s00371-024-03612-y","author":"Y Fan","year":"2024","unstructured":"Fan, Y., Song, J., Yuan, L., Jia, Y.: Hct-unet: multi-target medical image segmentation via a hybrid cnn-transformer unet incorporating multi-axis gated multi-layer perceptron. Visual Comput. (2024). https:\/\/doi.org\/10.1007\/s00371-024-03612-y","journal-title":"Visual Comput."},{"key":"3832_CR31","doi-asserted-by":"crossref","unstructured":"You, C., Zhao, R., Staib, L.H., Duncan, J.S.,: Momentum contrastive voxel-wise representation learning for semi-supervised volumetric medical image segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 639\u2013652. Springer, (2022)","DOI":"10.1007\/978-3-031-16440-8_61"},{"key":"3832_CR32","doi-asserted-by":"crossref","unstructured":"He, Kaiming, Zhang, Xiangyu, Ren, Shaoqing, Sun, Jian: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770\u2013778, (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"3832_CR33","unstructured":"Chen, L-C., Papandreou, G., Schroff, F., Adam, H.,: Rethinking atrous convolution for semantic image segmentation. arXiv preprint [SPACE]arXiv:1706.05587, (2017)"},{"key":"3832_CR34","doi-asserted-by":"crossref","unstructured":"Sandler, Mark, Howard, Andrew, Zhu, Menglong, Zhmoginov, Andrey, Chen, Liang-Chieh: Mobilenetv2: Inverted residuals and linear bottlenecks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4510\u20134520, (2018)","DOI":"10.1109\/CVPR.2018.00474"},{"key":"3832_CR35","unstructured":"Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.,: Attention is all you need. Adv. Neural Inf. Process. Syst., 30, (2017)"},{"issue":"3","key":"3832_CR36","doi-asserted-by":"publisher","first-page":"415","DOI":"10.1007\/s41095-022-0274-8","volume":"8","author":"W Wang","year":"2022","unstructured":"Wang, W., Xie, E., Li, X., Fan, D.-P., Song, K., Liang, D., Tong, L., Luo, P., Shao, L.: Pvt v2: improved baselines with pyramid vision transformer. Comput. Visual Media 8(3), 415\u2013424 (2022)","journal-title":"Comput. Visual Media"},{"key":"3832_CR37","doi-asserted-by":"crossref","unstructured":"Liu, Z., Hu, H., Lin, Y., Yao, Z., Xie, Z., Wei, Y., Ning, J., Cao, Y., Zhang, Z., Dong, L., et\u00a0al.: Swin transformer v2: Scaling up capacity and resolution. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12009\u201312019, (2022)","DOI":"10.1109\/CVPR52688.2022.01170"},{"key":"3832_CR38","doi-asserted-by":"crossref","unstructured":"Wang, W., Xie, E., Li, X., Fan, D-P., Song, K., Liang, D., Lu, T., Luo, P., Shao, L.,: Pyramid vision transformer: a versatile backbone for dense prediction without convolutions. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 568\u2013578, (2021)","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"3832_CR39","doi-asserted-by":"crossref","unstructured":"Yuan, L., Chen, Y., Wang, T., Yu, W., Shi, Y., Jiang, Z-H., Tay, F.E.H., Feng, J., Yan, S.,: Tokens-to-token vit: training vision transformers from scratch on imagenet. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 558\u2013567, (2021)","DOI":"10.1109\/ICCV48922.2021.00060"},{"key":"3832_CR40","doi-asserted-by":"crossref","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022, (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"3832_CR41","doi-asserted-by":"crossref","unstructured":"Wang, Q., Wu, B., Zhu, P., Li, P., Hu, Q.: Eca-net: Efficient channel attention for deep convolutional neural networks. In: 2020 IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), (2020)","DOI":"10.1109\/CVPR42600.2020.01152"},{"key":"3832_CR42","doi-asserted-by":"crossref","unstructured":"Dai, Y., Gieseke, F., Oehmcke, S., Wu, Y., Barnard, K.: Attentional feature fusion. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 3560\u20133569, (2021)","DOI":"10.1109\/WACV48630.2021.00360"},{"key":"3832_CR43","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2024.106513","volume":"96","author":"Y Chen","year":"2024","unstructured":"Chen, Y., Zhang, X., He, Y., Peng, L., Lei, P., Sun, F.: Mixunet: a lightweight medical image segmentation network capturing multidimensional semantic information. Biomed. Signal Process. Control 96, 106513 (2024)","journal-title":"Biomed. Signal Process. Control"},{"key":"3832_CR44","doi-asserted-by":"publisher","DOI":"10.1007\/s00371-024-03570-5","author":"SG Ali","year":"2024","unstructured":"Ali, S.G., Wang, X., Li, P., Li, H., Yang, P., Jung, Y., Qin, J., Kim, J., Sheng, B.: Egdnet: an efficient glomerular detection network for multiple anomalous pathological feature in glomerulonephritis. Visual Comput (2024). https:\/\/doi.org\/10.1007\/s00371-024-03570-5","journal-title":"Visual Comput"},{"key":"3832_CR45","doi-asserted-by":"publisher","first-page":"880","DOI":"10.1109\/TIP.2021.3136619","volume":"31","author":"A Nazir","year":"2021","unstructured":"Nazir, A., Cheema, M.N., Sheng, B., Li, P., Li, H., Xue, G., Qin, J., Kim, J., Feng, D.D.: Ecsu-net: an embedded clustering sliced u-net coupled with fusing strategy for efficient intervertebral disc segmentation and classification. IEEE Trans. Image Process. 31, 880\u2013893 (2021)","journal-title":"IEEE Trans. Image Process."},{"key":"3832_CR46","unstructured":"Liu, Z., Wang, Y., Vaidya, S., Ruehle, F., Halverson, J., Solja\u010di\u0107, M., Hou, TY., Tegmark, M.,: Kan: Kolmogorov-arnold networks. arXiv preprint arXiv:2404.19756, (2024)"},{"key":"3832_CR47","doi-asserted-by":"publisher","DOI":"10.1016\/j.dib.2019.104863","volume":"28","author":"W Al-Dhabyani","year":"2020","unstructured":"Al-Dhabyani, W., Gomaa, M., Khaled, H., Fahmy, A.: Dataset of breast ultrasound images. Data Brief 28, 104863 (2020)","journal-title":"Data Brief"},{"key":"3832_CR48","doi-asserted-by":"crossref","unstructured":"Bernal, J., S\u00e1nchez, F.J., Fern\u00e1ndez-Esparrach, G., Gil, D., Rodr\u00edguez, C., Vilari\u00f1o, F.: Wm-dova maps for accurate polyp highlighting in colonoscopy: validation vs. saliency maps from physicians. Comput. Med. Imaging Gr. 43, 99\u2013111 (2015)","DOI":"10.1016\/j.compmedimag.2015.02.007"},{"key":"3832_CR49","doi-asserted-by":"crossref","unstructured":"Sanderson, Edward, Matuszewski, Bogdan\u00a0J: Fcn-transformer feature fusion for polyp segmentation. In: Annual Conference on Medical Image Understanding and Analysis, pp. 892\u2013907. Springer, (2022)","DOI":"10.1007\/978-3-031-12053-4_65"},{"key":"3832_CR50","doi-asserted-by":"crossref","unstructured":"Jha, D., Smedsrud, PH., Riegler, MA., Halvorsen, P.l., de\u00a0Lange, T., Johansen, D., Johansen, H.D.,: Kvasir-seg: A segmented polyp dataset. In: MultiMedia Modeling: 26th International Conference, MMM 2020, Daejeon, South Korea, January 5\u20138, 2020, Proceedings, Part II 26, pp. 451\u2013462. Springer, (2020)","DOI":"10.1007\/978-3-030-37734-2_37"},{"key":"3832_CR51","doi-asserted-by":"publisher","first-page":"489","DOI":"10.1016\/j.media.2016.08.008","volume":"35","author":"K Sirinukunwattana","year":"2017","unstructured":"Sirinukunwattana, K., Pluim, J.P.W., Chen, H., Qi, X., Heng, P.-A., Guo, Y.B., Wang, L.Y., Matuszewski, B.J., Bruni, E., Sanchez, U., et al.: The glas challenge contest: gland segmentation in colon histology images. Med. Image Anal. 35, 489\u2013502 (2017)","journal-title":"Med. Image Anal."},{"key":"3832_CR52","doi-asserted-by":"publisher","first-page":"1247","DOI":"10.1038\/s41592-019-0612-7","volume":"16","author":"JC Caicedo","year":"2019","unstructured":"Caicedo, J.C., Goodman, A., Karhohs, K.W., Cimini, B.A., Ackerman, J., Haghighi, M., Heng, C., Becker, T., Doan, M., McQuin, C., et al.: Nucleus segmentation across imaging experiments: the 2018 data science bowl. Nature Methods 16, 1247\u20131253 (2019)","journal-title":"Nature Methods"},{"key":"3832_CR53","doi-asserted-by":"publisher","DOI":"10.1016\/j.neunet.2024.106280","volume":"175","author":"Y Chen","year":"2024","unstructured":"Chen, Y., Zhang, X., Peng, L., He, Y., Sun, F., Sun, H.: Medical image segmentation network based on multi-scale frequency domain filter. Neural Netw 175, 106280 (2024)","journal-title":"Neural Netw"},{"key":"3832_CR54","doi-asserted-by":"crossref","unstructured":"Valanarasu, J.M.J., Patel, V.M.: Unext: Mlp-based rapid medical image segmentation network. arXiv preprint arXiv:2203.04967, (2022)","DOI":"10.1007\/978-3-031-16443-9_3"}],"container-title":["The Visual Computer"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-03832-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00371-025-03832-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00371-025-03832-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,6]],"date-time":"2025-09-06T04:50:05Z","timestamp":1757134205000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00371-025-03832-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,2,14]]},"references-count":54,"journal-issue":{"issue":"10","published-print":{"date-parts":[[2025,8]]}},"alternative-id":["3832"],"URL":"https:\/\/doi.org\/10.1007\/s00371-025-03832-w","relation":{},"ISSN":["0178-2789","1432-2315"],"issn-type":[{"value":"0178-2789","type":"print"},{"value":"1432-2315","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,2,14]]},"assertion":[{"value":"26 January 2025","order":1,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"14 February 2025","order":2,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no known competing financial interests or personal relationships that could have appeared to influence the work reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest:"}}]}}