{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,31]],"date-time":"2026-03-31T21:34:35Z","timestamp":1774992875707,"version":"3.50.1"},"reference-count":34,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T00:00:00Z","timestamp":1733097600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["SIViP"],"published-print":{"date-parts":[[2025,1]]},"DOI":"10.1007\/s11760-024-03633-z","type":"journal-article","created":{"date-parts":[[2024,12,2]],"date-time":"2024-12-02T13:13:43Z","timestamp":1733145223000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":5,"title":["Uncertainty bidirectional guidance of multi-task mamba network for medical image classification and segmentation"],"prefix":"10.1007","volume":"19","author":[{"given":"Xingao","family":"Wu","sequence":"first","affiliation":[]},{"given":"Gang","family":"Gou","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,12,2]]},"reference":[{"key":"3633_CR1","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/LGRS.2023.3322452","volume":"20","author":"Q Wang","year":"2023","unstructured":"Wang, Q., Yin, C., Song, H., et al.: Utfnet: Uncertainty-guided trustworthy fusion network for rgb-thermal semantic segmentation. IEEE Geosci. Remote Sens. Lett. 20, 1\u20135 (2023). https:\/\/doi.org\/10.1109\/LGRS.2023.3322452","journal-title":"IEEE Geosci. Remote Sens. Lett."},{"key":"3633_CR2","doi-asserted-by":"publisher","unstructured":"Ren, K., Zou, K., Liu, X., et\u00a0al.: Uncertainty-informed mutual learning for joint medical image classification and segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pages 35\u201345. Springer, (2023). https:\/\/doi.org\/10.1007\/978-3-031-43901-8_4","DOI":"10.1007\/978-3-031-43901-8_4"},{"key":"3633_CR3","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1109\/TMI.2021.3079709","volume":"05","author":"X Wang","year":"2021","unstructured":"Wang, X., Jiang, L., Xu, M., et al.: Joint learning of 3d lesion segmentation and classification for explainable Covid-19 diagnosis. IEEE Trans. Med. Imaging 05, 1\u20131 (2021). https:\/\/doi.org\/10.1109\/TMI.2021.3079709","journal-title":"IEEE Trans. Med. Imaging"},{"key":"3633_CR4","doi-asserted-by":"publisher","DOI":"10.1016\/j.cmpb.2023.107921","volume":"243","author":"NG Inan","year":"2024","unstructured":"Inan, N.G., Kocada\u011fl\u0131, O., et al.: Multi-class classification of thyroid nodules from automatic segmented ultrasound images: hybrid resnet based unet convolutional neural network approach. Comput. Methods Programs Biomed. 243, 107921 (2024). https:\/\/doi.org\/10.1016\/j.cmpb.2023.107921","journal-title":"Comput. Methods Programs Biomed."},{"key":"3633_CR5","doi-asserted-by":"publisher","DOI":"10.1109\/ICIP49359.2023.10222770","author":"K Zhang","year":"2023","unstructured":"Zhang, K., Wang, B.: Classification task assisted segmentation network for breast tumor segmentation in ultrasound images. ICIP (2023). https:\/\/doi.org\/10.1109\/ICIP49359.2023.10222770","journal-title":"ICIP"},{"key":"3633_CR6","doi-asserted-by":"publisher","first-page":"170","DOI":"10.1007\/978-3-030-51935-3_18","volume-title":"Image and Signal Processing: 9th International Conference, ICISP 2020, Marrakesh, Morocco, June 4\u20136, 2020, Proceedings","author":"I Bakkouri","year":"2020","unstructured":"Bakkouri, I., Afdel, K.: DermoNet: a computer-aided diagnosis system for Dermoscopic disease recognition. In: El Moataz, A., Mammass, D., Mansouri, A., Nouboud, F. (eds.) Image and Signal Processing: 9th International Conference, ICISP 2020, Marrakesh, Morocco, June 4\u20136, 2020, Proceedings, pp. 170\u2013177. Springer International Publishing, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-51935-3_18"},{"key":"3633_CR7","doi-asserted-by":"publisher","DOI":"10.1007\/s11760-024-03240-y","author":"I Bakkouri","year":"2024","unstructured":"Bakkouri, I., Bakkouri, S.: 2mgas-net: multi-level multi-scale gated attentional squeezed network for polyp segmentation. SIViP (2024). https:\/\/doi.org\/10.1007\/s11760-024-03240-y","journal-title":"SIViP"},{"issue":"2","key":"3633_CR8","doi-asserted-by":"publisher","first-page":"2551","DOI":"10.1109\/TPAMI.2022.3171983","volume":"45","author":"Z Han","year":"2023","unstructured":"Han, Z., Zhang, C., et al.: Trusted multi-view classification with dynamic evidential fusion. IEEE Trans. Pattern Anal. Mach. Intell. 45(2), 2551\u20132566 (2023). https:\/\/doi.org\/10.1109\/TPAMI.2022.3171983","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"1","key":"3633_CR9","doi-asserted-by":"publisher","first-page":"6757","DOI":"10.1038\/s41467-023-42444-7","volume":"14","author":"M Wang","year":"2023","unstructured":"Wang, M., Lin, T., Wang, L., Lin, A., Zou, K., Xinxing, X., Zhou, Y., Peng, Y., Meng, Q., Qian, Y., et al.: Uncertainty-inspired open set learning for retinal anomaly identification. Nat. Commun. 14(1), 6757 (2023). https:\/\/doi.org\/10.1038\/s41467-023-42444-7","journal-title":"Nat. Commun."},{"key":"3633_CR10","doi-asserted-by":"crossref","unstructured":"Zou, K., Yuan, X., et\u00a0al.: Evidencecap: towards trustworthy medical image segmentation via evidential identity cap. (2023). Preprint at https:\/\/doi.org\/10.21203\/rs.3.rs-2558155\/v1","DOI":"10.21203\/rs.3.rs-2558155\/v1"},{"key":"3633_CR11","unstructured":"Sensoy, M., Kaplan, L., Kandemir, M.: Evidential deep learning to quantify classification uncertainty. In: Proceedings of the 32nd International Conference on Neural Information Processing Systems, NIPS\u201918, page 3183-3193, Red Hook, NY, USA, (2018). Curran Associates Inc"},{"key":"3633_CR12","doi-asserted-by":"publisher","unstructured":"Zou, K., Yuan, Xuedong, et\u00a0al.: Tbrats: Trusted brain tumor segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pages 503\u2013513. Springer, (2022). https:\/\/doi.org\/10.1007\/978-3-031-16452-1_48","DOI":"10.1007\/978-3-031-16452-1_48"},{"key":"3633_CR13","doi-asserted-by":"publisher","unstructured":"Ruan, J., Xie, M., et\u00a0al.: Ege-unet: an efficient group enhanced unet for skin lesion segmentation. In: International conference on medical image computing and computer-assisted intervention, pages 481\u2013490. Springer, (2023). https:\/\/doi.org\/10.1007\/978-3-031-43901-8_46","DOI":"10.1007\/978-3-031-43901-8_46"},{"key":"3633_CR14","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24574-4_28","author":"O Ronneberger","year":"2015","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-net: convolutional networks for biomedical image segmentation. Lecture Notes Comput. Sci. (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28","journal-title":"Lecture Notes Comput. Sci."},{"key":"3633_CR15","doi-asserted-by":"publisher","unstructured":"Ruan, J., Xiang, S., et\u00a0al.: Malunet: A multi-attention and light-weight unet for skin lesion segmentation. In: 2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM), pages 1150\u20131156. IEEE, (2022). https:\/\/doi.org\/10.1109\/BIBM55620.2022.9995040","DOI":"10.1109\/BIBM55620.2022.9995040"},{"key":"3633_CR16","unstructured":"Dosovitskiy, A., Beyer, L., et\u00a0al.: An image is worth 16x16 words: Transformers for image recognition at scale, (2021). https:\/\/arxiv.org\/abs\/2010.11929"},{"issue":"9","key":"3633_CR17","doi-asserted-by":"publisher","first-page":"2722","DOI":"10.1109\/TBME.2023.3262842","volume":"70","author":"F Chen","year":"2023","unstructured":"Chen, F., Han, H., Wan, P., et al.: Joint segmentation and differential diagnosis of thyroid nodule in contrast-enhanced ultrasound images. IEEE Trans. Biomed. Eng. 70(9), 2722\u20132732 (2023). https:\/\/doi.org\/10.1109\/TBME.2023.3262842","journal-title":"IEEE Trans. Biomed. Eng."},{"key":"3633_CR18","doi-asserted-by":"publisher","unstructured":"Liu, Z., Lin, Y., Cao, Y., et\u00a0al.: Swin transformer: Hierarchical vision transformer using shifted windows. In: 2021 IEEE\/CVF International Conference on Computer Vision (ICCV), pages 9992\u201310002, Los Alamitos, CA, USA, (2021). IEEE Computer Society. https:\/\/doi.org\/10.1109\/ICCV48922.2021.00986","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"3633_CR19","unstructured":"Liu, Y., Tian, Yunjie, et\u00a0al.: Vmamba: Visual state space model, (2024). https:\/\/arxiv.org\/abs\/2401.10166"},{"key":"3633_CR20","unstructured":"Gu, A., Dao, T.: Mamba: Linear-time sequence modeling with selective state spaces, (2024). https:\/\/arxiv.org\/abs\/2312.00752"},{"key":"3633_CR21","unstructured":"Ma, J., Li, F., Wang, B.: U-mamba: Enhancing long-range dependency for biomedical image segmentation, (2024). https:\/\/arxiv.org\/abs\/2401.04722"},{"key":"3633_CR22","unstructured":"Ruan, J., Xiang, S.: Vm-unet: Vision mamba unet for medical image segmentation, (2024). https:\/\/arxiv.org\/abs\/2402.02491"},{"key":"3633_CR23","unstructured":"Laurent, S.: Rigid-motion scattering for image classification. Ph. D. thesis section, 6(2), (2014)"},{"key":"3633_CR24","unstructured":"Ba, J.L., Kiros, J.R., Hinton, G.E.: Layer normalization, (2016). https:\/\/arxiv.org\/abs\/1607.06450"},{"key":"3633_CR25","doi-asserted-by":"publisher","first-page":"3","DOI":"10.1016\/j.neunet.2017.12.012","volume":"107","author":"S Elfwing","year":"2018","unstructured":"Elfwing, S., Uchibe, E., Doya, K.: Sigmoid-weighted linear units for neural network function approximation in reinforcement learning. Neural Netw. 107, 3\u201311 (2018). https:\/\/doi.org\/10.1016\/j.neunet.2017.12.012","journal-title":"Neural Netw."},{"key":"3633_CR26","doi-asserted-by":"crossref","unstructured":"J\u00f8sang, A.: Subjective logic, volume\u00a03. Springer, (2016)","DOI":"10.1007\/978-3-319-42337-1"},{"key":"3633_CR27","doi-asserted-by":"publisher","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 770\u2013778, (2016). https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"3633_CR28","doi-asserted-by":"publisher","DOI":"10.1016\/j.dib.2019.104863","volume":"28","author":"W Al-Dhabyani","year":"2020","unstructured":"Al-Dhabyani, W., Gomaa, M., et al.: Dataset of breast ultrasound images. Data Brief 28, 104863 (2020). https:\/\/doi.org\/10.1016\/j.dib.2019.104863","journal-title":"Data Brief"},{"key":"3633_CR29","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2019.101570","volume":"59","author":"JI Orlando","year":"2020","unstructured":"Orlando, J.I., Fu, H., et al.: Refuge challenge: a unified framework for evaluating automated methods for glaucoma assessment from fundus photographs. Med. Image Anal. 59, 101570 (2020). https:\/\/doi.org\/10.1016\/j.media.2019.101570","journal-title":"Med. Image Anal."},{"key":"3633_CR30","unstructured":"Yue, Y., Li, Z.: Medmamba: Vision mamba for medical image classification. arXiv preprint arXiv:2403.03849, (2024). https:\/\/arxiv.org\/abs\/2403.03849"},{"issue":"12","key":"3633_CR31","doi-asserted-by":"publisher","first-page":"3315","DOI":"10.1109\/TMI.2021.3083586","volume":"40","author":"M Zhu","year":"2021","unstructured":"Zhu, M., Chen, Z., Yuan, Y.: Dsi-net: deep synergistic interaction network for joint classification and segmentation with endoscope images. IEEE Trans. Med. Imaging 40(12), 3315\u20133325 (2021). https:\/\/doi.org\/10.1109\/TMI.2021.3083586","journal-title":"IEEE Trans. Med. Imaging"},{"key":"3633_CR32","doi-asserted-by":"publisher","unstructured":"Yang, K., Suzuki, A., et\u00a0al.: Multi-task learning with consistent prediction for efficient breast ultrasound tumor detection. In: 2022 IEEE International Conference on Bioinformatics and Biomedicine (BIBM), pages 3201\u20133208. IEEE, (2022). https:\/\/doi.org\/10.1109\/BIBM55620.2022.9995444","DOI":"10.1109\/BIBM55620.2022.9995444"},{"key":"3633_CR33","doi-asserted-by":"publisher","unstructured":"Xu, M., Huang, K., Qi, X.: Multi-task learning with context-oriented self-attention for breast ultrasound image classification and segmentation. In: 2022 IEEE 19th International Symposium on Biomedical Imaging (ISBI), pages 1\u20135. IEEE, (2022). https:\/\/doi.org\/10.1109\/ISBI52829.2022.9761685","DOI":"10.1109\/ISBI52829.2022.9761685"},{"key":"3633_CR34","doi-asserted-by":"publisher","unstructured":"Liu, S., Deng, W.: Very deep convolutional neural network based image classification using small training sample size. In: 2015 3rd IAPR Asian conference on pattern recognition (ACPR), pages 730\u2013734. IEEE, (2015). https:\/\/doi.org\/10.1109\/ACPR.2015.7486599","DOI":"10.1109\/ACPR.2015.7486599"}],"container-title":["Signal, Image and Video Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-024-03633-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11760-024-03633-z\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-024-03633-z.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,1,28]],"date-time":"2025-01-28T17:50:52Z","timestamp":1738086652000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11760-024-03633-z"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,12,2]]},"references-count":34,"journal-issue":{"issue":"1","published-print":{"date-parts":[[2025,1]]}},"alternative-id":["3633"],"URL":"https:\/\/doi.org\/10.1007\/s11760-024-03633-z","relation":{},"ISSN":["1863-1703","1863-1711"],"issn-type":[{"value":"1863-1703","type":"print"},{"value":"1863-1711","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,12,2]]},"assertion":[{"value":"26 July 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"28 September 2024","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"14 October 2024","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"2 December 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors did not receive support from any organization for subbmitted work.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}}],"article-number":"29"}}