{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,11]],"date-time":"2026-01-11T06:53:39Z","timestamp":1768114419480,"version":"3.49.0"},"reference-count":51,"publisher":"Springer Science and Business Media LLC","issue":"12","license":[{"start":{"date-parts":[[2024,9,3]],"date-time":"2024-09-03T00:00:00Z","timestamp":1725321600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,9,3]],"date-time":"2024-09-03T00:00:00Z","timestamp":1725321600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"the Cross Project of Medical and Engineering in Henan University","award":["No. YGJC2022002"],"award-info":[{"award-number":["No. YGJC2022002"]}]},{"DOI":"10.13039\/501100001809","name":"the National Natural Science Foundation of China","doi-asserted-by":"crossref","award":["No. 82100052"],"award-info":[{"award-number":["No. 82100052"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"crossref"}]},{"name":"the Key Scientific and Technological Projects in Henan Province, China","award":["No. 232102311012"],"award-info":[{"award-number":["No. 232102311012"]}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["SIViP"],"published-print":{"date-parts":[[2024,12]]},"DOI":"10.1007\/s11760-024-03530-5","type":"journal-article","created":{"date-parts":[[2024,9,3]],"date-time":"2024-09-03T06:02:35Z","timestamp":1725343355000},"page":"9067-9079","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":1,"title":["CascadeMedSeg: integrating pyramid vision transformer with multi-scale fusion for precise medical image segmentation"],"prefix":"10.1007","volume":"18","author":[{"given":"Junwei","family":"Li","sequence":"first","affiliation":[]},{"given":"Shengfeng","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Shijie","family":"Li","sequence":"additional","affiliation":[]},{"given":"Ruixue","family":"Xia","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,3]]},"reference":[{"issue":"2","key":"3530_CR1","doi-asserted-by":"publisher","first-page":"699","DOI":"10.1109\/TMI.2020.3035253","volume":"40","author":"R Gu","year":"2021","unstructured":"Gu, R., Wang, G., Song, T., Huang, R., Aertsen, M., Deprest, J., Ourselin, S., Vercauteren, T., Zhang, S.: Ca-net: Comprehensive attention convolutional neural networks for explainable medical image segmentation. IEEE Transactions on Medical Imaging 40(2), 699\u2013711 (2021). https:\/\/doi.org\/10.1109\/TMI.2020.3035253","journal-title":"IEEE Transactions on Medical Imaging"},{"key":"3530_CR2","doi-asserted-by":"publisher","DOI":"10.1016\/j.compmedimag.2023.102241","volume":"107","author":"S Bhandary","year":"2023","unstructured":"Bhandary, S., Kuhn, D., Babaiee, Z., Fechter, T., Benndorf, M., Zamboglou, C., Grosu, A.-L., Grosu, R.: Investigation and benchmarking of u-nets on prostate segmentation tasks. Computerized Medical Imaging and Graphics 107, 102241 (2023). https:\/\/doi.org\/10.1016\/j.compmedimag.2023.102241","journal-title":"Computerized Medical Imaging and Graphics"},{"issue":"4","key":"3530_CR3","doi-asserted-by":"publisher","first-page":"656","DOI":"10.1016\/j.bbe.2023.09.001","volume":"43","author":"G Lin","year":"2023","unstructured":"Lin, G., Chen, M., Tan, M., Chen, L., Chen, J.: A dual-stage transformer and mlp-based network for breast ultrasound image segmentation. Biocybernetics and Biomedical Engineering 43(4), 656\u2013671 (2023). https:\/\/doi.org\/10.1016\/j.bbe.2023.09.001","journal-title":"Biocybernetics and Biomedical Engineering"},{"issue":"17","key":"3530_CR4","doi-asserted-by":"publisher","first-page":"19990","DOI":"10.1007\/s10489-023-04570-z","volume":"53","author":"Z Yu","year":"2023","unstructured":"Yu, Z., Lee, F., Chen, Q.: Hct-net: hybrid cnn-transformer model based on a neural architecture search network for medical image segmentation. Applied Intelligence 53(17), 19990\u201320006 (2023). https:\/\/doi.org\/10.1007\/s10489-023-04570-z","journal-title":"Applied Intelligence"},{"issue":"1","key":"3530_CR5","doi-asserted-by":"publisher","first-page":"525","DOI":"10.1007\/s11760-023-02770-1","volume":"18","author":"H Wu","year":"2024","unstructured":"Wu, H., Zhang, Z., Zhang, Y., Sun, B., Zhang, X.: Acx-unet: a multi-scale lung parenchyma segmentation study with improved fusion of skip connection and circular cross-features extraction. Signal, Image and Video Processing 18(1), 525\u2013533 (2024). https:\/\/doi.org\/10.1007\/s11760-023-02770-1","journal-title":"Signal, Image and Video Processing"},{"issue":"8","key":"3530_CR6","doi-asserted-by":"publisher","first-page":"3731","DOI":"10.1109\/JBHI.2022.3227540","volume":"27","author":"MS Alam","year":"2022","unstructured":"Alam, M.S., Wang, D., Liao, Q., Sowmya, A.: A multi-scale context aware attention model for medical image segmentation. IEEE Journal of Biomedical and Health Informatics 27(8), 3731\u20133739 (2022). https:\/\/doi.org\/10.1109\/JBHI.2022.3227540","journal-title":"IEEE Journal of Biomedical and Health Informatics"},{"issue":"1","key":"3530_CR7","doi-asserted-by":"publisher","first-page":"121","DOI":"10.1109\/JBHI.2020.2986926","volume":"25","author":"A Sinha","year":"2020","unstructured":"Sinha, A., Dolz, J.: Multi-scale self-guided attention for medical image segmentation. IEEE Journal of Biomedical and Health Informatics 25(1), 121\u2013130 (2020). https:\/\/doi.org\/10.1109\/JBHI.2020.2986926","journal-title":"IEEE Journal of Biomedical and Health Informatics"},{"key":"3530_CR8","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2022.102478","volume":"80","author":"R Huang","year":"2022","unstructured":"Huang, R., Lin, M., Dou, H., Lin, Z., Ying, Q., Jia, X., Xu, W., Mei, Z., Yang, X., Dong, Y., et al.: Boundary-rendering network for breast lesion segmentation in ultrasound images. Medical Image Analysis 80, 102478 (2022). https:\/\/doi.org\/10.1016\/j.media.2022.102478","journal-title":"Medical Image Analysis"},{"key":"3530_CR9","doi-asserted-by":"publisher","unstructured":"Jha, D., Riegler, M.A., Johansen, D., Halvorsen, P., Johansen, H.D.: Doubleu-net: A deep convolutional neural network for medical image segmentation. In: International Symposium on Computer-Based Medical Systems (CBMS), pp. 558\u2013564 (2020). IEEE. https:\/\/doi.org\/10.1109\/CBMS49503.2020.00111","DOI":"10.1109\/CBMS49503.2020.00111"},{"key":"3530_CR10","doi-asserted-by":"publisher","unstructured":"Rahman, M.M., Marculescu, R.: Medical image segmentation via cascaded attention decoding. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision (WACV), pp. 6222\u20136231 (2023). https:\/\/doi.org\/10.1109\/WACV56688.2023.00616","DOI":"10.1109\/WACV56688.2023.00616"},{"key":"3530_CR11","doi-asserted-by":"publisher","DOI":"10.1016\/j.compmedimag.2019.05.004","volume":"76","author":"R Hemelings","year":"2019","unstructured":"Hemelings, R., Elen, B., Stalmans, I., Van Keer, K., De Boever, P., Blaschko, M.B.: Artery-vein segmentation in fundus images using a fully convolutional network. Computerized Medical Imaging and Graphics 76, 101636 (2019). https:\/\/doi.org\/10.1016\/j.compmedimag.2019.05.004","journal-title":"Computerized Medical Imaging and Graphics"},{"issue":"2","key":"3530_CR12","doi-asserted-by":"publisher","first-page":"476","DOI":"10.1109\/TMI.2021.3116087","volume":"41","author":"Z Ning","year":"2021","unstructured":"Ning, Z., Zhong, S., Feng, Q., Chen, W., Zhang, Y.: Smu-net: Saliency-guided morphology-aware u-net for breast lesion segmentation in ultrasound image. IEEE Transactions on Medical Imaging 41(2), 476\u2013490 (2021). https:\/\/doi.org\/10.1109\/TMI.2021.3116087","journal-title":"IEEE Transactions on Medical Imaging"},{"key":"3530_CR13","doi-asserted-by":"publisher","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-net: Convolutional networks for biomedical image segmentation. In: Medical Image Computing and Computer-Assisted Intervention, pp. 234\u2013241 (2015). Springer. https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"3530_CR14","doi-asserted-by":"publisher","unstructured":"Zhou, Z., Rahman\u00a0Siddiquee, M.M., Tajbakhsh, N., Liang, J.: Unet++: A nested u-net architecture for medical image segmentation. In: Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support, pp. 3\u201311 (2018). Springer. https:\/\/doi.org\/10.1007\/978-3-030-00889-5_1","DOI":"10.1007\/978-3-030-00889-5_1"},{"key":"3530_CR15","doi-asserted-by":"publisher","unstructured":"Xiao, X., Lian, S., Luo, Z., Li, S.: Weighted res-unet for high-quality retina vessel segmentation. In: International Conference on Information Technology in Medicine and Education (ITME), pp. 327\u2013331 (2018). IEEE. https:\/\/doi.org\/10.1109\/ITME.2018.00080","DOI":"10.1109\/ITME.2018.00080"},{"key":"3530_CR16","doi-asserted-by":"publisher","unstructured":"Huang, H., Lin, L., Tong, R., Hu, H., Zhang, Q., Iwamoto, Y., Han, X., Chen, Y.-W., Wu, J.: Unet 3+: A full-scale connected unet for medical image segmentation. In: IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 1055\u20131059 (2020). IEEE. https:\/\/doi.org\/10.1109\/icassp40776.2020.9053405","DOI":"10.1109\/icassp40776.2020.9053405"},{"key":"3530_CR17","unstructured":"Oktay, O., Schlemper, J., Folgoc, L.L., Lee, M., Heinrich, M., Misawa, K., Mori, K., McDonagh, S., Hammerla, N.Y., Kainz, B., et al.: Attention u-net: Learning where to look for the pancreas. arXiv abs\/1804. 03999 (2018)"},{"key":"3530_CR18","doi-asserted-by":"crossref","unstructured":"Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 7132\u20137141 (2018)","DOI":"10.1109\/CVPR.2018.00745"},{"issue":"10s","key":"3530_CR19","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1145\/3505244","volume":"54","author":"S Khan","year":"2022","unstructured":"Khan, S., Naseer, M., Hayat, M., Zamir, S.W., Khan, F.S., Shah, M.: Transformers in vision: A survey. ACM Computing Surveys (CSUR) 54(10s), 1\u201341 (2022). https:\/\/doi.org\/10.1145\/3505244","journal-title":"ACM Computing Surveys (CSUR)"},{"key":"3530_CR20","unstructured":"Wu, B., Xu, C., Dai, X., Wan, A., Zhang, P., Yan, Z., Tomizuka, M., Gonzalez, J., Keutzer, K., Vajda, P.: Visual transformers: Token-based image representation and processing for computer vision. arXiv abs:2006\/03677 (2020)"},{"issue":"5","key":"3530_CR21","doi-asserted-by":"publisher","first-page":"501","DOI":"10.1049\/sil2.12114","volume":"16","author":"J Xie","year":"2022","unstructured":"Xie, J., Zhu, R., Wu, Z., Ouyang, J.: Ffunet: A novel feature fusion makes strong decoder for medical image segmentation. IET Signal Processing 16(5), 501\u2013514 (2022). https:\/\/doi.org\/10.1049\/sil2.12114","journal-title":"IET Signal Processing"},{"key":"3530_CR22","doi-asserted-by":"publisher","unstructured":"Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 10012\u201310022 (2021). https:\/\/doi.org\/10.1109\/ICCV48922.2021.00986","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"3530_CR23","doi-asserted-by":"publisher","unstructured":"Wang, W., Xie, E., Li, X., Fan, D.-P., Song, K., Liang, D., Lu, T., Luo, P., Shao, L.: Pyramid vision transformer: A versatile backbone for dense prediction without convolutions. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 568\u2013578 (2021). https:\/\/doi.org\/10.1109\/ICCV48922.2021.00061","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"3530_CR24","doi-asserted-by":"publisher","unstructured":"Cheng, B., Misra, I., Schwing, A.G., Kirillov, A., Girdhar, R.: Masked-attention mask transformer for universal image segmentation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 1290\u20131299 (2022). https:\/\/doi.org\/10.1109\/CVPR52688.2022.00135","DOI":"10.1109\/CVPR52688.2022.00135"},{"key":"3530_CR25","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv abs\/2010.11929. (2020)"},{"issue":"3","key":"3530_CR26","doi-asserted-by":"publisher","first-page":"415","DOI":"10.1007\/s41095-022-0274-8","volume":"8","author":"W Wang","year":"2022","unstructured":"Wang, W., Xie, E., Li, X., Fan, D.-P., Song, K., Liang, D., Lu, T., Luo, P., Shao, L.: Pvt v2: Improved baselines with pyramid vision transformer. Computational Visual Media 8(3), 415\u2013424 (2022). https:\/\/doi.org\/10.1007\/s41095-022-0274-8","journal-title":"Computational Visual Media"},{"key":"3530_CR27","doi-asserted-by":"publisher","unstructured":"Wang, J., Huang, Q., Tang, F., Meng, J., Su, J., Song, S.: Stepwise feature fusion: Local guides global. In: Medical Image Computing and Computer Assisted Intervention, pp. 110\u2013120 (2022). Springer. https:\/\/doi.org\/10.1007\/978-3-031-16437-8_11","DOI":"10.1007\/978-3-031-16437-8_11"},{"issue":"12","key":"3530_CR28","doi-asserted-by":"publisher","first-page":"2481","DOI":"10.1109\/TPAMI.2016.2644615","volume":"39","author":"V Badrinarayanan","year":"2017","unstructured":"Badrinarayanan, V., Kendall, A., Cipolla, R.: Segnet: A deep convolutional encoder-decoder architecture for image segmentation. IEEE Transactions on Pattern Snalysis and Machine Intelligence 39(12), 2481\u20132495 (2017). https:\/\/doi.org\/10.1109\/TPAMI.2016.2644615","journal-title":"IEEE Transactions on Pattern Snalysis and Machine Intelligence"},{"key":"3530_CR29","unstructured":"Howard, A.G., Zhu, M., Chen, B., Kalenichenko, D., Wang, W., Weyand, T., Andreetto, M., Adam, H.: Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv abs1704\/04861 (2017)"},{"key":"3530_CR30","doi-asserted-by":"publisher","DOI":"10.1016\/j.compbiomed.2022.106173","volume":"150","author":"W Zhang","year":"2022","unstructured":"Zhang, W., Fu, C., Zheng, Y., Zhang, F., Zhao, Y., Sham, C.-W.: Hsnet: A hybrid semantic network for polyp segmentation. Computers in Biology and Medicine 150, 106173 (2022). https:\/\/doi.org\/10.1016\/j.compbiomed.2022.106173","journal-title":"Computers in Biology and Medicine"},{"key":"3530_CR31","doi-asserted-by":"publisher","unstructured":"Khalifa, A.F., Badr, E.: Deep learning for image segmentation: A focus on medical imaging. Comput. Mater. Contin 75(1), 1995\u20132024 (2023). https:\/\/doi.org\/10.32604\/cmc.2023.035888","DOI":"10.32604\/cmc.2023.035888"},{"key":"3530_CR32","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770\u2013778 (2016)","DOI":"10.1109\/CVPR.2016.90"},{"key":"3530_CR33","doi-asserted-by":"publisher","DOI":"10.1016\/j.compbiomed.2023.106626","volume":"154","author":"Q Xu","year":"2023","unstructured":"Xu, Q., Ma, Z., Na, H., Duan, W.: Dcsau-net: A deeper and more compact split-attention u-net for medical image segmentation. Computers in Biology and Medicine 154, 106626 (2023). https:\/\/doi.org\/10.1016\/j.compbiomed.2023.106626","journal-title":"Computers in Biology and Medicine"},{"key":"3530_CR34","unstructured":"Chen, J., Lu, Y., Yu, Q., Luo, X., Adeli, E., Wang, Y., Lu, L., Yuille, A.L., Zhou, Y.: Transunet: Transformers make strong encoders for medical image segmentation. arXiv abs 2102\/04306 (2021)"},{"key":"3530_CR35","doi-asserted-by":"publisher","unstructured":"Cao, H., Wang, Y., Chen, J., Jiang, D., Zhang, X., Tian, Q., Wang, M.: Swin-unet: Unet-like pure transformer for medical image segmentation. In: European Conference on Computer Vision, pp. 205\u2013218 (2022). Springer. https:\/\/doi.org\/10.1007\/978-3-031-25066-8_9","DOI":"10.1007\/978-3-031-25066-8_9"},{"key":"3530_CR36","doi-asserted-by":"publisher","unstructured":"Wang, H., Cao, P., Wang, J., Zaiane, O.R.: Uctransnet: rethinking the skip connections in u-net from a channel-wise perspective with transformer. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 36, pp. 2441\u20132449 (2022). https:\/\/doi.org\/10.1609\/aaai.v36i3.20144","DOI":"10.1609\/aaai.v36i3.20144"},{"key":"3530_CR37","doi-asserted-by":"publisher","unstructured":"Yi, L., Dong, Z., Xiao, F., Yufan, C., Kwang-Ting, C., Hao, C.: Rethinking boundary detection in deep learning models for medical image segmentation. In: Information Processing in Medical Imaging., pp. 730\u2013742 (2023). Springer. https:\/\/doi.org\/10.1007\/978-3-031-34048-2_56","DOI":"10.1007\/978-3-031-34048-2_56"},{"key":"3530_CR38","doi-asserted-by":"publisher","unstructured":"Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: European Conference on Computer Vision, pp. 213\u2013229 (2020). Springer. https:\/\/doi.org\/10.1007\/978-3-030-58452-8_13","DOI":"10.1007\/978-3-030-58452-8_13"},{"key":"3530_CR39","unstructured":"Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., J\u00e9gou, H.: Training data-efficient image transformers & distillation through attention. In: International Conference on Machine Learning, pp. 7358\u20137367 (2021)"},{"key":"3530_CR40","doi-asserted-by":"publisher","first-page":"461","DOI":"10.1109\/TIP.2019.2919937","volume":"29","author":"S Zhou","year":"2019","unstructured":"Zhou, S., Nie, D., Adeli, E., Yin, J., Lian, J., Shen, D.: High-resolution encoder-decoder networks for low-contrast medical image segmentation. IEEE Transactions on Image Processing 29, 461\u2013475 (2019). https:\/\/doi.org\/10.1109\/TIP.2019.2919937","journal-title":"IEEE Transactions on Image Processing"},{"key":"3530_CR41","doi-asserted-by":"publisher","unstructured":"Lin, T.-Y., Doll\u00e1r, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 2117\u20132125 (2017). https:\/\/doi.org\/10.1109\/CVPR.2017.106","DOI":"10.1109\/CVPR.2017.106"},{"key":"3530_CR42","doi-asserted-by":"publisher","unstructured":"Wang, Q., Wu, B., Zhu, P., Li, P., Zuo, W., Hu, Q.: Eca-net: Efficient channel attention for deep convolutional neural networks. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 11534\u201311542 (2020). https:\/\/doi.org\/10.48550\/arXiv.1910.03151","DOI":"10.48550\/arXiv.1910.03151"},{"key":"3530_CR43","doi-asserted-by":"publisher","unstructured":"Lin, W., Wu, Z., Chen, J., Huang, J., Jin, L.: Scale-aware modulation meet transformer. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision (ICCV), pp. 5992\u20136003 (2023). https:\/\/doi.org\/10.1109\/ICCV51070.2023.00553","DOI":"10.1109\/ICCV51070.2023.00553"},{"key":"3530_CR44","doi-asserted-by":"publisher","unstructured":"Tang, F., Xu, Z., Huang, Q., Wang, J., Hou, X., Su, J., Liu, J.: Duat: Dual-aggregation transformer network for medical image segmentation. In: Chinese Conference on Pattern Recognition and Computer Vision (PRCV), pp. 343\u2013356 (2023). Springer. https:\/\/doi.org\/10.1007\/978-981-99-8469-5_27","DOI":"10.1007\/978-981-99-8469-5_27"},{"key":"3530_CR45","doi-asserted-by":"publisher","unstructured":"Jha, D., Smedsrud, P.H., Riegler, M.A., Halvorsen, P., De\u00a0Lange, T., Johansen, D., Johansen, H.D.: Kvasir-seg: A segmented polyp dataset. In: MultiMedia Modeling: International Conference, pp. 451\u2013462 (2020). Springer. https:\/\/doi.org\/10.1007\/978-3-030-37734-2_37","DOI":"10.1007\/978-3-030-37734-2_37"},{"key":"3530_CR46","doi-asserted-by":"publisher","unstructured":"Bernal, J., S\u00e1nchez, F.J., Fern\u00e1ndez-Esparrach, G., Gil, D., Rodr\u00edguez, C., Vilari\u00f1o, F.: Wm-dova maps for accurate polyp highlighting in colonoscopy: Validation vs. saliency maps from physicians. Computerized Medical Imaging and Graphics 43, 99\u2013111 (2015). https:\/\/doi.org\/10.1016\/j.compmedimag.2015.02.007","DOI":"10.1016\/j.compmedimag.2015.02.007"},{"key":"3530_CR47","doi-asserted-by":"publisher","unstructured":"Codella, N.C., Gutman, D., Celebi, M.E., Helba, B., Marchetti, M.A., Dusza, S.W., Kalloo, A., Liopyris, K., Mishra, N., Kittler, H., et al.: Skin lesion analysis toward melanoma detection: A challenge at the 2017 international symposium on biomedical imaging (isbi), hosted by the international skin imaging collaboration (isic). In: International Symposium on Biomedical Imaging, pp. 168\u2013172 (2018). IEEE. https:\/\/doi.org\/10.1109\/ISBI.2018.8363547","DOI":"10.1109\/ISBI.2018.8363547"},{"issue":"1","key":"3530_CR48","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1038\/sdata.2018.161","volume":"5","author":"P Tschandl","year":"2018","unstructured":"Tschandl, P., Rosendahl, C., Kittler, H.: The ham10000 dataset, a large collection of multi-source dermatoscopic images of common pigmented skin lesions. Scientific Data 5(1), 1\u20139 (2018). https:\/\/doi.org\/10.1038\/sdata.2018.161","journal-title":"Scientific Data"},{"key":"3530_CR49","doi-asserted-by":"publisher","DOI":"10.1016\/j.dib.2019.104863","volume":"28","author":"W Al-Dhabyani","year":"2020","unstructured":"Al-Dhabyani, W., Gomaa, M., Khaled, H., Fahmy, A.: Dataset of breast ultrasound images. Data in Brief 28, 104863 (2020). https:\/\/doi.org\/10.1016\/j.dib.2019.104863","journal-title":"Data in Brief"},{"issue":"11","key":"3530_CR50","doi-asserted-by":"publisher","first-page":"2514","DOI":"10.1109\/10.1109\/TMI.2018.2837502","volume":"37","author":"O Bernard","year":"2018","unstructured":"Bernard, O., Lalande, A., Zotti, C., Cervenansky, F., Yang, X., Heng, P.-A., Cetin, I., Lekadir, K., Camara, O., Ballester, M.A.G., et al.: Deep learning techniques for automatic mri cardiac multi-structures segmentation and diagnosis: is the problem solved? IEEE Transactions on Medical Imaging 37(11), 2514\u20132525 (2018). https:\/\/doi.org\/10.1109\/10.1109\/TMI.2018.2837502","journal-title":"IEEE Transactions on Medical Imaging"},{"key":"3530_CR51","doi-asserted-by":"publisher","unstructured":"Wang, H., Xie, S., Lin, L., Iwamoto, Y., Han, X.-H., Chen, Y.-W., Tong, R.: Mixed transformer u-net for medical image segmentation. In: IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 2390\u20132394 (2022). IEEE. https:\/\/doi.org\/10.1109\/ICASSP43922.2022.9746172","DOI":"10.1109\/ICASSP43922.2022.9746172"}],"container-title":["Signal, Image and Video Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-024-03530-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11760-024-03530-5\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-024-03530-5.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,11,4]],"date-time":"2024-11-04T07:22:42Z","timestamp":1730704962000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11760-024-03530-5"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,3]]},"references-count":51,"journal-issue":{"issue":"12","published-print":{"date-parts":[[2024,12]]}},"alternative-id":["3530"],"URL":"https:\/\/doi.org\/10.1007\/s11760-024-03530-5","relation":{},"ISSN":["1863-1703","1863-1711"],"issn-type":[{"value":"1863-1703","type":"print"},{"value":"1863-1711","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,9,3]]},"assertion":[{"value":"9 July 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"5 August 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"20 August 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"3 September 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors have no Conflict of interest to declare that are relevant to the content of this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}}]}}