{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,9,16]],"date-time":"2025-09-16T16:37:55Z","timestamp":1758040675067,"version":"3.44.0"},"publisher-location":"Cham","reference-count":19,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031818530"},{"type":"electronic","value":"9783031818547"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-81854-7_15","type":"book-chapter","created":{"date-parts":[[2025,2,18]],"date-time":"2025-02-18T16:08:50Z","timestamp":1739894930000},"page":"218-231","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Modality-Specific Strategies for\u00a0Medical Image Segmentation Using Lightweight SAM Architectures"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-0715-3555","authenticated-orcid":false,"given":"Thuy","family":"Dao","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2544-835X","authenticated-orcid":false,"given":"Xincheng","family":"Ye","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-0071-5466","authenticated-orcid":false,"given":"Joshua","family":"Scarsbrook","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0002-3563-2887","authenticated-orcid":false,"given":"Gowrienanthan","family":"Balarupan","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-1620-4193","authenticated-orcid":false,"given":"Fernanda L.","family":"Ribeiro","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-2909-0906","authenticated-orcid":false,"given":"Steffen","family":"Bollmann","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,19]]},"reference":[{"key":"15_CR1","unstructured":"Bommasani, R., et al.: On the opportunities and risks of foundation models (2022). http:\/\/arxiv.org\/abs\/2108.07258, arXiv:2108.07258 [cs]"},{"key":"15_CR2","unstructured":"Deng, R., et al.: Segment anything model (SAM) for digital pathology: assess zero-shot segmentation on whole slide imaging. In: Medical Imaging with Deep Learning, Short Paper Track (2023). https:\/\/openreview.net\/forum?id=lUZGyTRzxq"},{"key":"15_CR3","unstructured":"Foret, P., Kleiner, A., Mobahi, H., Neyshabur, B.: Sharpness-aware minimization for efficiently improving generalization. In: International Conference on Learning Representations (2021). https:\/\/openreview.net\/forum?id=6Tm1mposlrM"},{"key":"15_CR4","doi-asserted-by":"publisher","unstructured":"Huang, Y., et al.: Segment anything model for medical images? Med. Image Anal. 92, 103061 (2024). https:\/\/doi.org\/10.1016\/j.media.2023.103061","DOI":"10.1016\/j.media.2023.103061"},{"key":"15_CR5","doi-asserted-by":"publisher","unstructured":"Kirillov, A., et al.: Segment anything (2023). https:\/\/doi.org\/10.48550\/arXiv.2304.02643, http:\/\/arxiv.org\/abs\/2304.02643, arXiv:2304.02643 [cs]","DOI":"10.48550\/arXiv.2304.02643"},{"key":"15_CR6","unstructured":"Le, B.H., Nguyen-Vu, D.K., Nguyen-Mau, T.H., Nguyen, H.D., Tran, M.T.: MedficientSAM: a robust medical segmentation model with optimized inference pipeline for limited clinical settings. In: Submitted to CVPR 2024: Segment Anything In Medical Images On Laptop (2024). https:\/\/openreview.net\/forum?id=aa0f77RKI0, under review"},{"key":"15_CR7","doi-asserted-by":"crossref","unstructured":"Liu, X., Peng, H., Zheng, N., Yang, Y., Hu, H., Yuan, Y.: EfficientViT: memory efficient vision transformer with cascaded group attention. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 14420\u201314430 (2023). https:\/\/arxiv.org\/abs\/2305.07027","DOI":"10.1109\/CVPR52729.2023.01386"},{"key":"15_CR8","unstructured":"Liu, Z., et al.: Swin transformer: hierarchical vision transformer using shifted windows. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 10012\u201310022 (2021). http:\/\/arxiv.org\/abs\/2103.14030"},{"key":"15_CR9","doi-asserted-by":"publisher","unstructured":"Liyanage, H., et al.: Artificial intelligence in primary health care: perceptions, issues, and challenges: primary health care informatics working group contribution to the yearbook of medical informatics 2019. Yearbook Med. Inform. 28(01), 041\u2013046 (2019). https:\/\/doi.org\/10.1055\/s-0039-1677901, http:\/\/www.thieme-connect.de\/DOI\/DOI?10.1055\/s-0039-1677901","DOI":"10.1055\/s-0039-1677901"},{"key":"15_CR10","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization (2019). http:\/\/arxiv.org\/abs\/1711.05101, arXiv:1711.05101 [cs, math]"},{"key":"15_CR11","doi-asserted-by":"publisher","unstructured":"Ma, J., He, Y., Li, F., Han, L., You, C., Wang, B.: Segment anything in medical images. Nat. Commun. 15(1), 654 (2024). https:\/\/doi.org\/10.1038\/s41467-024-44824-z, https:\/\/www.nature.com\/articles\/s41467-024-44824-z","DOI":"10.1038\/s41467-024-44824-z"},{"key":"15_CR12","doi-asserted-by":"publisher","unstructured":"Marin\u00f3, G.C., Petrini, A., Malchiodi, D., Frasca, M.: Deep neural networks compression: a comparative survey and choice recommendations. Neurocomputing 520, 152\u2013170 (2023). https:\/\/doi.org\/10.1016\/j.neucom.2022.11.072, https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0925231222014643","DOI":"10.1016\/j.neucom.2022.11.072"},{"key":"15_CR13","doi-asserted-by":"publisher","unstructured":"Mazurowski, M.A., Dong, H., Gu, H., Yang, J., Konz, N., Zhang, Y.: Segment anything model for medical image analysis: an experimental study. Med. Image Anal. 89, 102918 (2023). https:\/\/doi.org\/10.1016\/j.media.2023.102918","DOI":"10.1016\/j.media.2023.102918"},{"key":"15_CR14","unstructured":"Roy, S., et al.: SAM.MD: zero-shot medical image segmentation capabilities of the Segment Anything Model (2023). http:\/\/arxiv.org\/abs\/2304.05396, arXiv:2304.05396 [cs, eess]"},{"key":"15_CR15","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"68","DOI":"10.1007\/978-3-031-19803-8_5","volume-title":"Computer Vision - ECCV 2022","author":"K Wu","year":"2022","unstructured":"Wu, K., et al.: TinyViT: fast pretraining distillation for small vision transformers. In: Avidan, S., Brostow, G., Ciss\u00e9, M., Farinella, G.M., Hassner, T. (eds.) ECCV 2022. LNCS, vol. 13681, pp. 68\u201385. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-19803-8_5"},{"key":"15_CR16","unstructured":"Xiong, Y., et al.: EfficientSAM: leveraged masked image pretraining for efficient segment anything (2023). http:\/\/arxiv.org\/abs\/2312.00863, arXiv:2312.00863 [cs]"},{"issue":"7","key":"15_CR17","doi-asserted-by":"publisher","DOI":"10.1016\/j.patter.2022.100543","volume":"3","author":"Z Xu","year":"2022","unstructured":"Xu, Z., et al.: Codabench: flexible, easy-to-use, and reproducible meta-benchmark platform. Patterns 3(7), 100543 (2022)","journal-title":"Patterns"},{"key":"15_CR18","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Cai, H., Han, S.: EfficientViT-SAM: accelerated segment anything model without accuracy loss (2024). http:\/\/arxiv.org\/abs\/2402.05008, arXiv:2402.05008 [cs]","DOI":"10.1109\/CVPRW63382.2024.00782"},{"key":"15_CR19","doi-asserted-by":"publisher","unstructured":"Zhou, T., Ruan, S., Canu, S.: A review: deep learning for medical image segmentation using multi-modality fusion. Array 3-4, 100004 (2019). https:\/\/doi.org\/10.1016\/j.array.2019.100004, https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S2590005619300049","DOI":"10.1016\/j.array.2019.100004"}],"container-title":["Lecture Notes in Computer Science","Medical Image Segmentation Foundation Models. CVPR 2024 Challenge: Segment Anything in Medical Images on Laptop"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-81854-7_15","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T09:24:05Z","timestamp":1757582645000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-81854-7_15"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031818530","9783031818547"],"references-count":19,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-81854-7_15","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"19 February 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"The authors have no competing interests to declare that are relevant to the content of this article.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Disclosure of Interests"}},{"value":"MedSAM on Laptop","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Medical Image Segmentation Challenge","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Seattle, WA","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"USA","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 June 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 June 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"medsam2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.codabench.org\/competitions\/1847\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}