{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,5,1]],"date-time":"2026-05-01T03:29:35Z","timestamp":1777606175707,"version":"3.51.4"},"reference-count":42,"publisher":"Springer Science and Business Media LLC","issue":"5","license":[{"start":{"date-parts":[[2024,9,19]],"date-time":"2024-09-19T00:00:00Z","timestamp":1726704000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,9,19]],"date-time":"2024-09-19T00:00:00Z","timestamp":1726704000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Zhejiang Provincial Natural Science Foundation of China","award":["LQ23F020021"],"award-info":[{"award-number":["LQ23F020021"]}]},{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["62406288"],"award-info":[{"award-number":["62406288"]}],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Multimedia Systems"],"published-print":{"date-parts":[[2024,10]]},"DOI":"10.1007\/s00530-024-01498-3","type":"journal-article","created":{"date-parts":[[2024,9,24]],"date-time":"2024-09-24T06:03:00Z","timestamp":1727157780000},"update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["Dual triple attention guided CNN-VMamba for medical image segmentation"],"prefix":"10.1007","volume":"30","author":[{"given":"Qiaohong","family":"Chen","sequence":"first","affiliation":[]},{"given":"Jing","family":"Li","sequence":"additional","affiliation":[]},{"given":"Xian","family":"Fang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,9,19]]},"reference":[{"issue":"10","key":"1498_CR1","doi-asserted-by":"publisher","first-page":"6695","DOI":"10.1109\/TPAMI.2021.3100536","volume":"44","author":"J Ma","year":"2022","unstructured":"Ma, J., Zhang, Y., Gu, S., Zhu, C., Ge, C., Zhang, Y., An, X., Wang, C., Wang, Q., Liu, X., Cao, S., Zhang, Q., Liu, S., Wang, Y., Li, Y., He, J., Yang, X.: Abdomenct-1k: Is abdominal organ segmentation a solved problem? IEEE Trans. Pattern Anal. Mach. Intell. 44(10), 6695\u20136714 (2022). https:\/\/doi.org\/10.1109\/TPAMI.2021.3100536","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"1498_CR2","doi-asserted-by":"crossref","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-net: convolutional networks for biomedical image segmentation. In: Medical Image Computing and Computer-Assisted Intervention\u2013MICCAI 2015: 18th International Conference, Munich, Germany, pp. 234\u2013241. Springer (2015)","DOI":"10.1007\/978-3-319-24574-4_28"},{"issue":"2","key":"1498_CR3","doi-asserted-by":"publisher","first-page":"203","DOI":"10.1038\/s41592-020-01008-z","volume":"18","author":"F Isensee","year":"2021","unstructured":"Isensee, F., Jaeger, P.F., Kohl, S.A., Petersen, J., Maier-Hein, K.H.: nnu-net: a self-configuring method for deep learning-based biomedical image segmentation. Nat. Methods 18(2), 203\u2013211 (2021)","journal-title":"Nat. Methods"},{"key":"1498_CR4","doi-asserted-by":"publisher","first-page":"201","DOI":"10.1007\/978-981-99-4284-8_16","volume-title":"Advanced Computational and Communication Paradigms","author":"R Agarwal","year":"2023","unstructured":"Agarwal, R., Ghosal, P., Murmu, N., Nandi, D.: Spiking neural network in\u00c2 computer vision: techniques, tools and\u00c2 trends. In: Borah, S., Gandhi, T.K., Piuri, V. (eds.) Advanced Computational and Communication Paradigms, pp. 201\u2013209. Springer, Singapore (2023)"},{"key":"1498_CR5","doi-asserted-by":"publisher","DOI":"10.1016\/j.bspc.2023.105890","volume":"90","author":"B Li","year":"2024","unstructured":"Li, B., Wang, Y., Xu, Y., Wu, C.: Dsst: a dual student model guided student-teacher framework for semi-supervised medical image segmentation. Biomed. Signal Process. Control 90, 105890 (2024). https:\/\/doi.org\/10.1016\/j.bspc.2023.105890","journal-title":"Biomed. Signal Process. Control"},{"issue":"1","key":"1498_CR6","doi-asserted-by":"publisher","first-page":"357","DOI":"10.1109\/TMI.2020.3027341","volume":"40","author":"H Wu","year":"2021","unstructured":"Wu, H., Pan, J., Li, Z., Wen, Z., Qin, J.: Automated skin lesion segmentation via an adaptive dual attention module. IEEE Trans. Med. Imaging 40(1), 357\u2013370 (2021). https:\/\/doi.org\/10.1109\/TMI.2020.3027341","journal-title":"IEEE Trans. Med. Imaging"},{"key":"1498_CR7","doi-asserted-by":"crossref","unstructured":"Peng, Z., Huang, W., Gu, S., Xie, L., Wang, Y., Jiao, J., Ye, Q.: Conformer: local features coupling global representations for visual recognition. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 367\u2013376 (2021)","DOI":"10.1109\/ICCV48922.2021.00042"},{"key":"1498_CR8","doi-asserted-by":"crossref","unstructured":"Guo, J., Han, K., Wu, H., Tang, Y., Chen, X., Wang, Y., Xu, C.: Cmt: convolutional neural networks meet vision transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 12175\u201312185 (2022)","DOI":"10.1109\/CVPR52688.2022.01186"},{"key":"1498_CR9","unstructured":"Ma, J., Li, F., Wang, B.: U-mamba: Enhancing long-range dependency for biomedical image segmentation. CoRR. arxiv:2401.04722 (2024)"},{"key":"1498_CR10","doi-asserted-by":"publisher","unstructured":"Azad, R., Arimond, R., Aghdam, E.K., Kazerouni, A., Merhof, D.: DAE-former: dual attention-guided efficient transformer for medical image segmentation. In: Rekik, I., Adeli, E., Park, S.H., Cintas, C., Zamzmi, G. (eds.) PRIME, Canada. Lecture Notes in Computer Science, vol. 14277, pp. 83\u201395. Springer (2023). https:\/\/doi.org\/10.1007\/978-3-031-46005-0_8","DOI":"10.1007\/978-3-031-46005-0_8"},{"key":"1498_CR11","doi-asserted-by":"publisher","unstructured":"Ghosal, P., Reddy, S., Sai, C., Pandey, V., Chakraborty, J., Nandi, D.: A deep adaptive convolutional network for brain tumor segmentation from multimodal MR images. In: TENCON 2019 - 2019 IEEE Region 10 Conference (TENCON), pp. 1065\u20131070 (2019). https:\/\/doi.org\/10.1109\/TENCON.2019.8929402","DOI":"10.1109\/TENCON.2019.8929402"},{"key":"1498_CR12","unstructured":"Chen, J., Lu, Y., Yu, Q., Luo, X., Adeli, E., Wang, Y., Lu, L., Yuille, A.L., Zhou, Y.: Transunet: Transformers make strong encoders for medical image segmentation. CoRR. arxiv:2102.04306 (2021)"},{"issue":"5","key":"1498_CR13","doi-asserted-by":"publisher","first-page":"1484","DOI":"10.1109\/TMI.2022.3230943","volume":"42","author":"X Huang","year":"2023","unstructured":"Huang, X., Deng, Z., Li, D., Yuan, X., Fu, Y.: Missformer: an effective transformer for 2d medical image segmentation. IEEE Trans. Med. Imaging 42(5), 1484\u20131494 (2023). https:\/\/doi.org\/10.1109\/TMI.2022.3230943","journal-title":"IEEE Trans. Med. Imaging"},{"key":"1498_CR14","doi-asserted-by":"publisher","first-page":"4036","DOI":"10.1109\/TIP.2023.3293771","volume":"32","author":"H Zhou","year":"2023","unstructured":"Zhou, H., Guo, J., Zhang, Y., Han, X., Yu, L., Wang, L., Yu, Y.: nnformer: Volumetric medical image segmentation via a 3d transformer. IEEE Trans. Image Process. 32, 4036\u20134045 (2023). https:\/\/doi.org\/10.1109\/TIP.2023.3293771","journal-title":"IEEE Trans. Image Process."},{"key":"1498_CR15","doi-asserted-by":"crossref","unstructured":"Lin, G., Chen, L.: A multi-scale fusion network with transformer for medical image segmentation. In: 2023 3rd International Conference on Neural Networks, Information and Communication Engineering (NNICE), pp. 224\u2013228 (2023). IEEE","DOI":"10.1109\/NNICE58320.2023.10105758"},{"key":"1498_CR16","unstructured":"Xu, L., Chen, M., Cheng, Y., Shao, P., Shen, S., Yao, P., Xu, R.X.: MCPA: multi-scale cross perceptron attention network for 2d medical image segmentation. CoRR. arxiv: 2307.14588 (2023)"},{"key":"1498_CR17","doi-asserted-by":"crossref","unstructured":"Ke, Y., Yu, S., Wang, Z., Li, Y.: ECSFF: Exploring efficient cross-scale feature fusion for medical image segmentation. In: 2023 28th International Conference on Automation and Computing (ICAC), pp. 1\u20136 (2023). IEEE","DOI":"10.1109\/ICAC57885.2023.10275282"},{"key":"1498_CR18","doi-asserted-by":"crossref","unstructured":"Wang, H., Cao, P., Wang, J., Zaiane, O.R.: Uctransnet: rethinking the skip connections in U-net from a channel-wise perspective with transformer. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol. 36, pp. 2441\u20132449 (2022)","DOI":"10.1609\/aaai.v36i3.20144"},{"key":"1498_CR19","doi-asserted-by":"publisher","unstructured":"Gu, A., Dao, T.: Mamba: Linear-time sequence modeling with selective state spaces. CoRR arxiv:2312.00752 (2023) https:\/\/doi.org\/10.48550\/ARXIV.2312.00752","DOI":"10.48550\/ARXIV.2312.00752"},{"key":"1498_CR20","unstructured":"Zhu, L., Liao, B., Zhang, Q., Wang, X., Liu, W., Wang, X.: Vision mamba: Efficient visual representation learning with bidirectional state space model. arXiv preprint arXiv:2401.09417 (2024)"},{"key":"1498_CR21","doi-asserted-by":"publisher","unstructured":"Liu, Y., Tian, Y., Zhao, Y., Yu, H., Xie, L., Wang, Y., Ye, Q., Liu, Y.: Vmamba: Visual state space model. CoRR. arxiv:2401.10166 (2024) https:\/\/doi.org\/10.48550\/ARXIV.2401.10166","DOI":"10.48550\/ARXIV.2401.10166"},{"key":"1498_CR22","unstructured":"Ruan, J., Xiang, S.: Vm-unet: vision mamba unet for medical image segmentation. CoRR. arxiv:2402.02491 (2024)"},{"key":"1498_CR23","doi-asserted-by":"crossref","unstructured":"Liu, J., Yang, H., Zhou, H., Xi, Y., Yu, L., Yu, Y., Liang, Y., Shi, G., Zhang, S., Zheng, H., Wang, S.: Swin-umamba: mamba-based unet with imagenet-based pretraining. CoRR. arxiv:2402.03302 (2024)","DOI":"10.1007\/978-3-031-72114-4_59"},{"key":"1498_CR24","unstructured":"Wang, L., Li, D., Dong, S., Meng, X., Zhang, X., Hong, D.: Pyramidmamba: Rethinking pyramid feature fusion with selective space state model for semantic segmentation of remote sensing imagery. arXiv preprint arXiv:2406.10828 (2024)"},{"key":"1498_CR25","doi-asserted-by":"crossref","unstructured":"Chen, K., Chen, B., Liu, C., Li, W., Zou, Z., Shi, Z.: RSMamba: remote sensing image classification with state space model. IEEE Geosci. Remote Sens. Lett. (2024)","DOI":"10.1109\/LGRS.2024.3407111"},{"key":"1498_CR26","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z.: Rethinking the inception architecture for computer vision. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2818\u20132826 (2016)","DOI":"10.1109\/CVPR.2016.308"},{"key":"1498_CR27","doi-asserted-by":"publisher","unstructured":"Azad, R., Heidari, M., Yilmaz, K., H\u00fcttemann, M., Karimijafarbigloo, S., Wu, Y., Schmeink, A., Merhof, D.: Loss functions in the era of semantic segmentation: A survey and outlook. CoRR. arxiv:2312.05391 (2023) https:\/\/doi.org\/10.48550\/ARXIV.2312.05391","DOI":"10.48550\/ARXIV.2312.05391"},{"issue":"11","key":"1498_CR28","doi-asserted-by":"publisher","first-page":"2514","DOI":"10.1109\/TMI.2018.2837502","volume":"37","author":"O Bernard","year":"2018","unstructured":"Bernard, O., Lalande, A., Zotti, C., Cervenansky, F., Yang, X., Heng, P.-A., Cetin, I., Lekadir, K., Camara, O., Ballester, M.A.G., et al.: Deep learning techniques for automatic MRI cardiac multi-structures segmentation and diagnosis: is the problem solved? IEEE Trans. Med. Imaging 37(11), 2514\u20132525 (2018)","journal-title":"IEEE Trans. Med. Imaging"},{"key":"1498_CR29","doi-asserted-by":"crossref","unstructured":"Fu, S., Lu, Y., Wang, Y., Zhou, Y., Shen, W., Fishman, E., Yuille, A.: Domain adaptive relational reasoning for 3d multi-organ segmentation. In: Medical Image Computing and Computer Assisted Intervention\u2013MICCAI 2020: 23rd International Conference, Lima, Peru, pp. 656\u2013666. Springer (2020)","DOI":"10.1007\/978-3-030-59710-8_64"},{"key":"1498_CR30","doi-asserted-by":"publisher","first-page":"197","DOI":"10.1016\/j.media.2019.01.012","volume":"53","author":"J Schlemper","year":"2019","unstructured":"Schlemper, J., Oktay, O., Schaap, M., Heinrich, M., Kainz, B., Glocker, B., Rueckert, D.: Attention gated networks: learning to leverage salient regions in medical images. Med. Image Anal. 53, 197\u2013207 (2019)","journal-title":"Med. Image Anal."},{"key":"1498_CR31","doi-asserted-by":"publisher","first-page":"42","DOI":"10.1007\/978-981-99-8543-2_4","volume-title":"Pattern Recognition and Computer Vision","author":"G Xu","year":"2024","unstructured":"Xu, G., Zhang, X., He, X., Wu, X.: LeViT-UNet: make faster encoders with Transformer for medical image segmentation. In: Liu, Q., Wang, H., Ma, Z., Zheng, W., Zha, H., Chen, X., Wang, L., Ji, R. (eds.) Pattern Recognition and Computer Vision, pp. 42\u201353. Springer, Singapore (2024)"},{"key":"1498_CR32","doi-asserted-by":"crossref","unstructured":"Cao, H., Wang, Y., Chen, J., Jiang, D., Zhang, X., Tian, Q., Wang, M.: Swin-unet: Unet-like pure transformer for medical image segmentation. In: European Conference on Computer Vision, pp. 205\u2013218. Springer (2022)","DOI":"10.1007\/978-3-031-25066-8_9"},{"key":"1498_CR33","doi-asserted-by":"publisher","first-page":"108205","DOI":"10.1109\/ACCESS.2022.3211501","volume":"10","author":"R Azad","year":"2022","unstructured":"Azad, R., Al-Antary, M.T., Heidari, M., Merhof, D.: Transnorm: transformer provides a strong spatial normalization mechanism for a deep segmentation model. IEEE Access 10, 108205\u2013108215 (2022)","journal-title":"IEEE Access"},{"key":"1498_CR34","doi-asserted-by":"crossref","unstructured":"Azad, R., Heidari, M., Shariatnia, M., Aghdam, E.K., Karimijafarbigloo, S., Adeli, E., Merhof, D.: Transdeeplab: convolution-free transformer-based Deeplab V3+ for medical image segmentation. In: International Workshop on PRedictive Intelligence In MEdicine, pp. 91\u2013102 (2022)","DOI":"10.1007\/978-3-031-16919-9_9"},{"key":"1498_CR35","doi-asserted-by":"crossref","unstructured":"Heidari, M., Kazerouni, A., Soltany, M., Azad, R., Aghdam, E.K., Cohen-Adad, J., Merhof, D.: Hiformer: hierarchical multi-scale representations using transformers for medical image segmentation. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 6202\u20136212 (2023)","DOI":"10.1109\/WACV56688.2023.00614"},{"key":"1498_CR36","doi-asserted-by":"crossref","unstructured":"Rahman, M.M., Marculescu, R.: Medical image segmentation via cascaded attention decoding. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 6222\u20136231 (2023)","DOI":"10.1109\/WACV56688.2023.00616"},{"key":"1498_CR37","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., Uszkoreit, J., Houlsby, N.: An image is worth 16x16 words: transformers for image recognition at scale. In: 9th International Conference on Learning Representations (2021)"},{"key":"1498_CR38","doi-asserted-by":"crossref","unstructured":"Hatamizadeh, A., Tang, Y., Nath, V., Yang, D., Myronenko, A., Landman, B., Roth, H.R., Xu, D.: Unetr: transformers for 3d medical image segmentation. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 574\u2013584 (2022)","DOI":"10.1109\/WACV51458.2022.00181"},{"key":"1498_CR39","doi-asserted-by":"publisher","DOI":"10.1109\/TMI.2024.3398728","author":"AM Shaker","year":"2024","unstructured":"Shaker, A.M., Maaz, M., Rasheed, H., Khan, S., Yang, M.-H., Khan, F.S.: Unetr++: delving into efficient and accurate 3d medical image segmentation. IEEE Trans. Medical Imaging (2024). https:\/\/doi.org\/10.1109\/TMI.2024.3398728","journal-title":"IEEE Trans Medical Imaging"},{"key":"1498_CR40","doi-asserted-by":"publisher","unstructured":"Azad, R., Jia, Y., Aghdam, E.K., Cohen-Adad, J., Merhof, D.: Enhancing medical image segmentation with transception: a multi-scale feature fusion approach. CoRR. arxiv:2301.10847 (2023) https:\/\/doi.org\/10.48550\/ARXIV.2301.10847","DOI":"10.48550\/ARXIV.2301.10847"},{"key":"1498_CR41","doi-asserted-by":"crossref","unstructured":"Zhu, L., Wang, X., Ke, Z., Zhang, W., Lau, R.W.: Biformer: vision transformer with bi-level routing attention. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10323\u201310333 (2023)","DOI":"10.1109\/CVPR52729.2023.00995"},{"key":"1498_CR42","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2023.102987","volume":"91","author":"J Dai","year":"2024","unstructured":"Dai, J., Liu, T., Torigian, D.A., Tong, Y., Han, S., Nie, P., Zhang, J., Li, R., Xie, F., Udupa, J.K.: Ga-net: a geographical attention neural network for the segmentation of body torso tissue composition. Med. Image Anal. 91, 102987 (2024). https:\/\/doi.org\/10.1016\/j.media.2023.102987","journal-title":"Med. Image Anal."}],"container-title":["Multimedia Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01498-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s00530-024-01498-3\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s00530-024-01498-3.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T18:14:12Z","timestamp":1730139252000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s00530-024-01498-3"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,9,19]]},"references-count":42,"journal-issue":{"issue":"5","published-print":{"date-parts":[[2024,10]]}},"alternative-id":["1498"],"URL":"https:\/\/doi.org\/10.1007\/s00530-024-01498-3","relation":{},"ISSN":["0942-4962","1432-1882"],"issn-type":[{"value":"0942-4962","type":"print"},{"value":"1432-1882","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,9,19]]},"assertion":[{"value":"29 June 2024","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"7 September 2024","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"19 September 2024","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that there is no competing of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"Not applicable.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical approval"}}],"article-number":"275"}}