{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,1,16]],"date-time":"2026-01-16T10:58:03Z","timestamp":1768561083333,"version":"3.49.0"},"publisher-location":"Cham","reference-count":34,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031721199","type":"print"},{"value":"9783031721205","type":"electronic"}],"license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2024]]},"DOI":"10.1007\/978-3-031-72120-5_52","type":"book-chapter","created":{"date-parts":[[2024,10,2]],"date-time":"2024-10-02T12:02:53Z","timestamp":1727870573000},"page":"555-566","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":4,"title":["Revisiting Self-attention in\u00a0Medical Transformers via\u00a0Dependency Sparsification"],"prefix":"10.1007","author":[{"given":"Xian","family":"Lin","sequence":"first","affiliation":[]},{"given":"Zhehao","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Zengqiang","family":"Yan","sequence":"additional","affiliation":[]},{"given":"Li","family":"Yu","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,10,3]]},"reference":[{"issue":"1","key":"52_CR1","doi-asserted-by":"publisher","first-page":"87","DOI":"10.1109\/TPAMI.2022.3152247","volume":"45","author":"K Han","year":"2022","unstructured":"Han, K., et al.: A survey on vision transformer. IEEE Trans. Pattern Anal. Mach. Intell. 45(1), 87\u2013110 (2022)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"52_CR2","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"52_CR3","unstructured":"Vaswani, A., et al.: Attention is all you need. arXiv preprint arXiv:1706.03762 (2017)"},{"key":"52_CR4","doi-asserted-by":"crossref","unstructured":"Zheng, S., et al.: Rethinking semantic segmentation from a sequence-to-sequence perspective with transformers. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 6881\u20136890 (2021)","DOI":"10.1109\/CVPR46437.2021.00681"},{"key":"52_CR5","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2023.102762","volume":"85","author":"J Li","year":"2023","unstructured":"Li, J., et al.: Transforming medical imaging with Transformers? A comparative review of key properties, current progresses, and future perspectives. Med. Image Anal. 85, 102672 (2023)","journal-title":"Med. Image Anal."},{"key":"52_CR6","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2023.102802","volume":"88","author":"F Shamshad","year":"2023","unstructured":"Shamshad, F., et al.: Transformers in medical imaging: A survey. Med. Image Anal. 88, 102802 (2023)","journal-title":"Med. Image Anal."},{"key":"52_CR7","unstructured":"Wang. P., et al.: Going deeper with image transformers. In: European Conference on Computer Vision, pp. 285\u2013302 (2022)"},{"key":"52_CR8","doi-asserted-by":"crossref","unstructured":"Xia, Z., Pan, X., Song, S., Li, L. E., Huang, G.: Vision transformer with deformable attention. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4794\u20134803 (2022)","DOI":"10.1109\/CVPR52688.2022.00475"},{"key":"52_CR9","doi-asserted-by":"crossref","unstructured":"Cao, H., et al. Swin-unet: Unet-like pure transformer for medical image segmentation. In: European Conference on Computer Vision, pp. 205\u2013218 (2022)","DOI":"10.1007\/978-3-031-25066-8_9"},{"issue":"5","key":"52_CR10","doi-asserted-by":"publisher","first-page":"1484","DOI":"10.1109\/TMI.2022.3230943","volume":"42","author":"X Huang","year":"2022","unstructured":"Huang, X., Deng, Z., Li, D., Yuan, X., Fu, Y.: MISSFormer: An effective transformer for 2d medical image segmentation. IEEE Trans. Med. Imag. 42(5), 1484\u20131494 (2022)","journal-title":"IEEE Trans. Med. Imag."},{"key":"52_CR11","doi-asserted-by":"publisher","unstructured":"Ou, Y., et al.: Patcher: Patch transformers with mixture of experts for precise medical image segmentation. In: Wang, Li., Dou, Q., Fletcher, P.T., Speidel S., Li, S. (eds.) MICCAI 2022, LNCS, vol. 13431, pp. 475\u2013484. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-16443-9_46","DOI":"10.1007\/978-3-031-16443-9_46"},{"key":"52_CR12","unstructured":"Landman, B., Xu, Z., Igelsias, J., Styner, M., Langerak, T., Klein, A.: Miccai multi-atlas labeling beyond the cranial vault-workshop and challenge. In: Proc. MICCAI Multi-Atlas Labeling Beyond Cranial Vault-Workshop Challenge, pp. 12 (2015)"},{"key":"52_CR13","doi-asserted-by":"crossref","unstructured":"Ren, S., Zhou, D., He, S., Feng, J., Wang, X.: Shunted self-attention via multi-scale token aggregation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10853\u201310862 (2022)","DOI":"10.1109\/CVPR52688.2022.01058"},{"key":"52_CR14","doi-asserted-by":"crossref","unstructured":"Wang, W., et al.: Pyramid vision transformer: A versatile backbone for dense prediction without convolutions. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 568\u2013578 (2021)","DOI":"10.1109\/ICCV48922.2021.00061"},{"key":"52_CR15","unstructured":"Chu, X., et al.: Twins: Revisiting the design of spatial attention in vision transformers. Advances in Neural Information Processing Systems, pp. 9355\u20139366 (2021)"},{"key":"52_CR16","unstructured":"Ho, J., Kalchbrenner, N., Weissenborn, D., Salimans, T.: Axial attention in multidimensional transformers. arXiv preprint arXiv:1912.12180 (2019)"},{"key":"52_CR17","doi-asserted-by":"crossref","unstructured":"Liu, Z., et al.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021)","DOI":"10.1109\/ICCV48922.2021.00986"},{"key":"52_CR18","doi-asserted-by":"crossref","unstructured":"Zhu, L., Wang, X., Ke, Z., Zhang, W., Lau, R. W.: BiFormer: Vision Transformer with Bi-Level Routing Attention. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10323\u201310333 (2023)","DOI":"10.1109\/CVPR52729.2023.00995"},{"key":"52_CR19","unstructured":"Huang, H., Zhou, X., Cao, J., He, R., Tan, T.: Vision Transformer with Super Token Sampling. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10323\u201310333 (2023)"},{"key":"52_CR20","doi-asserted-by":"crossref","unstructured":"Grainger, R., Paniagua, T., Song, X., Cuntoor, N., Lee, M. W., Wu, T.: PaCa-ViT: Learning Patch-to-Cluster Attention in Vision Transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 22690\u201322699 (2023)","DOI":"10.1109\/CVPR52729.2023.01781"},{"key":"52_CR21","doi-asserted-by":"publisher","unstructured":"Zhang, Y., Liu, H., Hu, Q.: Transfuse: Fusing transformers and cnns for medical image segmentation. In: de Bruijne, M., et al. (eds.) MICCAI 2021, LNCS, vol. 12901, pp. 14\u201324. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87193-2_2","DOI":"10.1007\/978-3-030-87193-2_2"},{"key":"52_CR22","doi-asserted-by":"crossref","unstructured":"Wu, H., Chen, S., Chen, G., Wang, W., Lei, B., Wen, Z.: FAT-Net: Feature adaptive transformers for automated skin lesion segmentation: Medical Image Anal. 76, 102327 (2022)","DOI":"10.1016\/j.media.2021.102327"},{"key":"52_CR23","doi-asserted-by":"publisher","unstructured":"Valanarasu, J. M., Oza, P., Hacihaliloglu, I., Patel, V. M.: Medical transformer: Gated axial-attention for medical image segmentation. In: de Bruijne, M., et al. (eds.) MICCAI 2021, LNCS, vol. 12901, pp. 36\u201346. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87193-2_4","DOI":"10.1007\/978-3-030-87193-2_4"},{"key":"52_CR24","unstructured":"Bernard, O., et al.: Deep learning techniques for automatic MRI cardiac multi-structures segmentation and diagnosis: is the problem solved? IEEE Trans. Med. Imag. 37(11), 2514\u20132525 (2018) multi-source dermatoscopic images of common pigmented skin lesions. Sci. Data. 5(1), 1\u20139 (2018)"},{"key":"52_CR25","unstructured":"Li, X., et al.: The state-of-the-art 3D anisotropic intracranial hemorrhage segmentation on non-contrast head CT: The INSTANCE challenge. arXiv preprint arXiv:2301.03281 (2023)"},{"key":"52_CR26","doi-asserted-by":"publisher","first-page":"4036","DOI":"10.1109\/TIP.2023.3293771","volume":"42","author":"HY Zhou","year":"2023","unstructured":"Zhou, H. Y., et al.: nnFormer: Volumetric medical image segmentation via a 3D transformer. IEEE Trans. Image Process. 42, 4036\u20134045 (2023)","journal-title":"IEEE Trans. Image Process."},{"key":"52_CR27","doi-asserted-by":"publisher","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-net: Convolutional networks for biomedical image segmentation. In: Navab, N., Hornegger, J., Wells, W. M., Frangi, A.F. (eds.) MICCAI 2015, LNCS, vol. 9351, pp. 234\u2013241. Springer, Cham (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28","DOI":"10.1007\/978-3-319-24574-4_28"},{"issue":"2","key":"52_CR28","doi-asserted-by":"publisher","first-page":"699","DOI":"10.1109\/TMI.2020.3035253","volume":"40","author":"R Gu","year":"2020","unstructured":"Gu, R., et al.: CA-Net: Comprehensive attention convolutional neural networks for explainable medical image segmentation. IEEE Trans. Med. Imag. 40(2), 699\u2013711 (2020)","journal-title":"IEEE Trans. Med. Imag."},{"issue":"5","key":"52_CR29","doi-asserted-by":"publisher","first-page":"1289","DOI":"10.1109\/TMI.2022.3226268","volume":"42","author":"G Chen","year":"2023","unstructured":"Chen, G., Li, L., Dai, Y., Zhang, J., Yap, M. H.: AAU-net: an adaptive attention U-net for breast lesions segmentation in ultrasound images. IEEE Trans. Med. Imag. 42(5), 1289\u20131300 (2023)","journal-title":"IEEE Trans. Med. Imag."},{"issue":"2","key":"52_CR30","doi-asserted-by":"publisher","first-page":"2023","DOI":"10.1038\/s41592-020-01008-z","volume":"18","author":"F Isensee","year":"2021","unstructured":"Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., Maier-Hein, K. H.: nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nat. Methods. 18(2), 2023\u20132011 (2021)","journal-title":"Nat. Methods."},{"key":"52_CR31","unstructured":"Chen, J., et al. Transunet: Transformers make strong encoders for medical image segmentation. arXiv preprint arXiv:2102.04306 (2021)"},{"issue":"9","key":"52_CR32","doi-asserted-by":"publisher","first-page":"2763","DOI":"10.1109\/TMI.2023.3264513","volume":"42","author":"A He","year":"2023","unstructured":"He, A., Wang, K., Li, T., Du, C., Xia, S., Fu, H.: H2former: An efficient hierarchical hybrid transformer for medical image segmentation. IEEE Trans. Med. Imag. 42(9), 2763\u20132775 (2023)","journal-title":"IEEE Trans. Med. Imag."},{"key":"52_CR33","doi-asserted-by":"publisher","unstructured":"Roy, S., et al.: Mednext: transformer-driven scaling of convnets for medical image segmentation.. In: Greenspan, H., et al. (eds.) MICCAI 2023, LNCS, vol. 14223, pp. 405\u2013415. Springer, Cham (2023). https:\/\/doi.org\/10.1007\/978-3-031-43901-8_39","DOI":"10.1007\/978-3-031-43901-8_39"},{"key":"52_CR34","doi-asserted-by":"publisher","unstructured":"Wang, W., Chen, C., Ding, M., Yu, H., Zha, S., Li, J.: TransBTS: Multimodal brain tumor segmentation using transformer. In: de Bruijne, M., et al. (eds.) MICCAI 2021, LNCS, vol. 12901, pp. 109\u2013119. Springer, Cham (2021). https:\/\/doi.org\/10.1007\/978-3-030-87193-2_11","DOI":"10.1007\/978-3-030-87193-2_11"}],"container-title":["Lecture Notes in Computer Science","Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2024"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-72120-5_52","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,2]],"date-time":"2024-10-02T12:28:07Z","timestamp":1727872087000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-72120-5_52"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"ISBN":["9783031721199","9783031721205"],"references-count":34,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-72120-5_52","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024]]},"assertion":[{"value":"3 October 2024","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"The authors have no competing interests to declare that are relevant to the content of this article.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Disclosure of Interests"}},{"value":"MICCAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Medical Image Computing and Computer-Assisted Intervention","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Marrakesh","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Morocco","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"7 October 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"11 October 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"miccai2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/conferences.miccai.org\/2024\/en\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}