{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,19]],"date-time":"2025-09-19T07:47:09Z","timestamp":1758268029913,"version":"3.44.0"},"publisher-location":"Cham","reference-count":28,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783032049643","type":"print"},{"value":"9783032049650","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,9,19]],"date-time":"2025-09-19T00:00:00Z","timestamp":1758240000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,9,19]],"date-time":"2025-09-19T00:00:00Z","timestamp":1758240000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2026]]},"DOI":"10.1007\/978-3-032-04965-0_18","type":"book-chapter","created":{"date-parts":[[2025,9,18]],"date-time":"2025-09-18T08:06:30Z","timestamp":1758182790000},"page":"185-195","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Edge-Aware Token Halting for\u00a0Efficient and\u00a0Accurate Medical Image Segmentation"],"prefix":"10.1007","author":[{"given":"Yuhao","family":"Guo","sequence":"first","affiliation":[]},{"given":"Bo","family":"Song","sequence":"additional","affiliation":[]},{"given":"Heng","family":"Fan","sequence":"additional","affiliation":[]},{"given":"Erkang","family":"Cheng","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,19]]},"reference":[{"key":"18_CR1","doi-asserted-by":"crossref","unstructured":"Almalik, F., Yaqub, M., Nandakumar, K.: Self-ensembling vision transformer (SEViT) for robust medical image classification. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 376\u2013386. Springer (2022)","DOI":"10.1007\/978-3-031-16437-8_36"},{"issue":"1","key":"18_CR2","doi-asserted-by":"publisher","first-page":"4128","DOI":"10.1038\/s41467-022-30695-9","volume":"13","author":"M Antonelli","year":"2022","unstructured":"Antonelli, M., Reinke, A., Bakas, S., Farahani, K., Kopp-Schneider, A., Landman, B.A., Litjens, G., Menze, B., Ronneberger, O., Summers, R.M., et al.: The medical segmentation decathlon. Nat. Commun. 13(1), 4128 (2022)","journal-title":"Nat. Commun."},{"key":"18_CR3","unstructured":"Bengio, Y., L\u00e9onard, N., Courville, A.: Estimating or propagating gradients through stochastic neurons for conditional computation. arXiv preprint arXiv:1308.3432 (2013)"},{"key":"18_CR4","unstructured":"Bolya, D., Fu, C.Y., Dai, X., Zhang, P., Feichtenhofer, C., Hoffman, J.: Token merging: your ViT but faster. In: International Conference on Learning Representations (2023)"},{"key":"18_CR5","doi-asserted-by":"crossref","unstructured":"Cao, H., et al.: Swin-Unet: Unet-like pure transformer for medical image segmentation. In: European Conference on Computer Vision, pp. 205\u2013218. Springer (2022)","DOI":"10.1007\/978-3-031-25066-8_9"},{"key":"18_CR6","unstructured":"Chen, J., et al.: TransUNet: Transformers make strong encoders for medical image segmentation. arXiv preprint arXiv:2102.04306 (2021)"},{"key":"18_CR7","unstructured":"Dosovitskiy, A., et\u00a0al.: An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"18_CR8","doi-asserted-by":"crossref","unstructured":"Du, S., Bayasi, N., Hamarneh, G., Garbi, R.: Mdvit: multi-domain vision transformer for small medical image segmentation datasets. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 448\u2013458. Springer (2023)","DOI":"10.1007\/978-3-031-43901-8_43"},{"key":"18_CR9","doi-asserted-by":"publisher","unstructured":"Fu, S., et al.: Domain adaptive relational reasoning for 3D multi-organ segmentation. In: Martel, A.L., Abolmaesumi, P., Stoyanov, D., Mateus, D., Zuluaga, M.A., Zhou, S.K., Racoceanu, D., Joskowicz, L. (eds.) MICCAI 2020. LNCS, vol. 12261, pp. 656\u2013666. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-59710-8_64","DOI":"10.1007\/978-3-030-59710-8_64"},{"key":"18_CR10","doi-asserted-by":"crossref","unstructured":"Hatamizadeh, A., et al.: UNETR: transformers for 3D medical image segmentation. In: Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision, pp. 574\u2013584 (2022)","DOI":"10.1109\/WACV51458.2022.00181"},{"key":"18_CR11","doi-asserted-by":"crossref","unstructured":"Isensee, F., et al.: nnU-Net Revisited: a call for rigorous validation in 3d medical image segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 488\u2013498. Springer (2024)","DOI":"10.1007\/978-3-031-72114-4_47"},{"key":"18_CR12","unstructured":"Landman, B., Xu, Z., Igelsias, J., Styner, M., Langerak, T., Klein, A.: MICCAI multi-atlas labeling beyond the cranial vault\u2013workshop and challenge. In: Proceedings of MICCAI Multi-Atlas Labeling Beyond Cranial Vault\u2014Workshop Challenge. vol.\u00a05, p.\u00a012 (2015)"},{"key":"18_CR13","first-page":"35462","volume":"35","author":"W Liang","year":"2022","unstructured":"Liang, W., et al.: Expediting large-scale vision transformer for dense prediction without fine-tuning. Adv. Neural. Inf. Process. Syst. 35, 35462\u201335477 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"18_CR14","unstructured":"Liang, Y., Ge, C., Tong, Z., Song, Y., Wang, J., Xie, P.: Not all patches are what you need: Expediting vision transformers via token reorganizations. In: International Conference on Learning Representations (2022). https:\/\/openreview.net\/forum?id=BjyvwnXXVn_"},{"key":"18_CR15","doi-asserted-by":"crossref","unstructured":"Lu, C., de\u00a0Geus, D., Dubbelman, G.: Content-aware token sharing for efficient semantic segmentation with vision transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 23631\u201323640 (2023)","DOI":"10.1109\/CVPR52729.2023.02263"},{"key":"18_CR16","doi-asserted-by":"crossref","unstructured":"Lyu, P., Zhang, J., Zhang, L., Liu, W., Wang, C., Zhu, J.: MetaUNETR: rethinking token mixer encoding for efficient multi-organ segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 446\u2013455. Springer (2024)","DOI":"10.1007\/978-3-031-72114-4_43"},{"key":"18_CR17","doi-asserted-by":"crossref","unstructured":"Milletari, F., Navab, N., Ahmadi, S.A.: V-Net: fully convolutional neural networks for volumetric medical image segmentation. In: 2016 Fourth International Conference on 3D Vision (3DV), pp. 565\u2013571. IEEE (2016)","DOI":"10.1109\/3DV.2016.79"},{"key":"18_CR18","doi-asserted-by":"crossref","unstructured":"Peiris, H., Hayat, M., Chen, Z., Egan, G., Harandi, M.: A robust volumetric transformer for accurate 3D tumor segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 162\u2013172. Springer (2022)","DOI":"10.1007\/978-3-031-16443-9_16"},{"key":"18_CR19","first-page":"13937","volume":"34","author":"Y Rao","year":"2021","unstructured":"Rao, Y., Zhao, W., Liu, B., Lu, J., Zhou, J., Hsieh, C.J.: DynamicViT: efficient vision transformers with dynamic token Sparsification. Adv. Neural. Inf. Process. Syst. 34, 13937\u201313949 (2021)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"18_CR20","doi-asserted-by":"publisher","unstructured":"Ronneberger, O., Fischer, P., Brox, T.: U-Net: convolutional networks for biomedical image segmentation. In: Navab, N., Hornegger, J., Wells, W.M., Frangi, A.F. (eds.) MICCAI 2015. LNCS, vol. 9351, pp. 234\u2013241. Springer, Cham (2015). https:\/\/doi.org\/10.1007\/978-3-319-24574-4_28","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"18_CR21","doi-asserted-by":"crossref","unstructured":"She, D., Zhang, Y., Zhang, Z., Li, H., Yan, Z., Sun, X.: EoFormer: edge-oriented transformer for brain tumor segmentation. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 333\u2013343. Springer (2023)","DOI":"10.1007\/978-3-031-43901-8_32"},{"key":"18_CR22","doi-asserted-by":"crossref","unstructured":"Tang, Q., Zhang, B., Liu, J., Liu, F., Liu, Y.: Dynamic token pruning in plain vision transformers for semantic segmentation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 777\u2013786 (2023)","DOI":"10.1109\/ICCV51070.2023.00078"},{"key":"18_CR23","unstructured":"Vaswani, A., et al.: Attention is all you need. Adv. Neural Inf. Process. Syst. 30 (2017)"},{"key":"18_CR24","doi-asserted-by":"crossref","unstructured":"Ye, M., Meyer, G.P., Chai, Y., Liu, Q.: Efficient transformer-based 3D object detection with dynamic token halting. arXiv preprint arXiv:2303.05078 (2023)","DOI":"10.1109\/ICCV51070.2023.00775"},{"key":"18_CR25","doi-asserted-by":"crossref","unstructured":"Yin, H., Vahdat, A., Alvarez, J.M., Mallya, A., Kautz, J., Molchanov, P.: A-ViT: adaptive tokens for efficient vision transformer. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 10809\u201310818 (2022)","DOI":"10.1109\/CVPR52688.2022.01054"},{"key":"18_CR26","first-page":"4971","volume":"35","author":"B Zhang","year":"2022","unstructured":"Zhang, B., Tian, Z., Tang, Q., Chu, X., Wei, X., Shen, C., et al.: SegViT: semantic segmentation with plain vision transformers. Adv. Neural. Inf. Process. Syst. 35, 4971\u20134982 (2022)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"18_CR27","doi-asserted-by":"crossref","unstructured":"Zhou, L., Liu, H., Bae, J., He, J., Samaras, D., Prasanna, P.: Token sparsification for faster medical image segmentation. In: International Conference on Information Processing in Medical Imaging, pp. 743\u2013754. Springer (2023)","DOI":"10.1007\/978-3-031-34048-2_57"},{"key":"18_CR28","unstructured":"Zhu, X., Su, W., Lu, L., Li, B., Wang, X., Dai, J.: Deformable DETR: Deformable transformers for end-to-end object detection. arXiv preprint arXiv:2010.04159 (2020)"}],"container-title":["Lecture Notes in Computer Science","Medical Image Computing and Computer Assisted Intervention \u2013 MICCAI 2025"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-032-04965-0_18","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,18]],"date-time":"2025-09-18T22:06:27Z","timestamp":1758233187000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-032-04965-0_18"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,19]]},"ISBN":["9783032049643","9783032049650"],"references-count":28,"URL":"https:\/\/doi.org\/10.1007\/978-3-032-04965-0_18","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9,19]]},"assertion":[{"value":"19 September 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"The authors have no competing interests to declare that are relevant to the content of this article.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Disclosure of Interests"}},{"value":"MICCAI","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Conference on Medical Image Computing and Computer-Assisted Intervention","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Daejeon","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Korea (Republic of)","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2025","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 September 2025","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 September 2025","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"28","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"miccai2025","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/conferences.miccai.org\/2025\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}