{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,22]],"date-time":"2026-04-22T20:21:45Z","timestamp":1776889305808,"version":"3.51.2"},"publisher-location":"Cham","reference-count":27,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031818530","type":"print"},{"value":"9783031818547","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-81854-7_1","type":"book-chapter","created":{"date-parts":[[2025,2,18]],"date-time":"2025-02-18T16:08:29Z","timestamp":1739894909000},"page":"1-14","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["MedficientSAM: A Robust Medical Segmentation Model with\u00a0Optimized Inference Pipeline for\u00a0Limited Clinical Settings"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0002-3673-2970","authenticated-orcid":false,"given":"Bao-Hiep","family":"Le","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0008-9469-7852","authenticated-orcid":false,"given":"Dang-Khoa","family":"Nguyen-Vu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-2823-3861","authenticated-orcid":false,"given":"Trong-Hieu","family":"Nguyen-Mau","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-0888-8908","authenticated-orcid":false,"given":"Hai-Dang","family":"Nguyen","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0003-3046-3041","authenticated-orcid":false,"given":"Minh-Triet","family":"Tran","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,19]]},"reference":[{"key":"1_CR1","unstructured":"Bradski, G.: The OpenCV library. Dr. Dobb\u2019s J. Softw. Tools (2000)"},{"key":"1_CR2","unstructured":"Cai, H., Li, J., Hu, M., Gan, C., Han, S.: Efficientvit: multi-scale linear attention for high-resolution dense prediction (2024). https:\/\/arxiv.org\/abs\/2205.14756"},{"key":"1_CR3","doi-asserted-by":"crossref","unstructured":"Chen, C., et al.: MA-SAM: modality-agnostic SAM adaptation for 3D medical image segmentation (2023). https:\/\/arxiv.org\/abs\/2309.08842","DOI":"10.1016\/j.media.2024.103310"},{"key":"1_CR4","doi-asserted-by":"crossref","unstructured":"Feichtenhofer, C.: X3D: expanding architectures for efficient video recognition (2020)","DOI":"10.1109\/CVPR42600.2020.00028"},{"key":"1_CR5","doi-asserted-by":"publisher","unstructured":"Gong, S., et al.: 3DSAM-adapter: holistic adaptation of SAM from 2D to 3D for promptable tumor segmentation. Med. Image Anal. 98, 103324 (2024). https:\/\/doi.org\/10.1016\/j.media.2024.103324","DOI":"10.1016\/j.media.2024.103324"},{"key":"1_CR6","doi-asserted-by":"publisher","unstructured":"He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770\u2013778 (2016). https:\/\/doi.org\/10.1109\/CVPR.2016.90","DOI":"10.1109\/CVPR.2016.90"},{"key":"1_CR7","doi-asserted-by":"publisher","unstructured":"Isensee, F., Jaeger, P.F., Kohl, S.A.A., Petersen, J., Maier-Hein, K.H.: nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nat. Methods 18(2), 203\u2013211 (2021). https:\/\/doi.org\/10.1038\/s41592-020-01008-z","DOI":"10.1038\/s41592-020-01008-z"},{"key":"1_CR8","unstructured":"Katharopoulos, A., Vyas, A., Pappas, N., Fleuret, F.: Transformers are RNNs: fast autoregressive transformers with linear attention (2020)"},{"key":"1_CR9","doi-asserted-by":"crossref","unstructured":"Kirillov, A., et al.: Segment anything. arXiv:2304.02643 (2023)","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"1_CR10","doi-asserted-by":"crossref","unstructured":"Li, H., Liu, H., Hu, D., Wang, J., Oguz, I.: Promise: prompt-driven 3D medical image segmentation using pretrained image foundation models (2023). https:\/\/arxiv.org\/abs\/2310.19721","DOI":"10.1109\/ISBI56570.2024.10635207"},{"key":"1_CR11","doi-asserted-by":"publisher","unstructured":"Lin, T.Y., Goyal, P., Girshick, R., He, K., Doll\u00e1r, P.: Focal loss for dense object detection. In: 2017 IEEE International Conference on Computer Vision (ICCV), pp. 2999\u20133007 (2017). https:\/\/doi.org\/10.1109\/ICCV.2017.324","DOI":"10.1109\/ICCV.2017.324"},{"key":"1_CR12","unstructured":"Loshchilov, I., Hutter, F.: SGDR: stochastic gradient descent with warm restarts. In: International Conference on Learning Representations (2017). https:\/\/openreview.net\/forum?id=Skq89Scxx"},{"key":"1_CR13","unstructured":"Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: International Conference on Learning Representations (2019). https:\/\/openreview.net\/forum?id=Bkg6RiCqY7"},{"key":"1_CR14","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2021.102035","volume":"71","author":"J Ma","year":"2021","unstructured":"Ma, J., et al.: Loss odyssey in medical image segmentation. Med. Image Anal. 71, 102035 (2021)","journal-title":"Med. Image Anal."},{"issue":"1","key":"1_CR15","doi-asserted-by":"publisher","first-page":"654","DOI":"10.1038\/s41467-024-44824-z","volume":"15","author":"J Ma","year":"2024","unstructured":"Ma, J., He, Y., Li, F., Han, L., You, C., Wang, B.: Segment anything in medical images. Nat. Commun. 15(1), 654 (2024)","journal-title":"Nat. Commun."},{"key":"1_CR16","unstructured":"Mabille, J., Corlay, S., Vollprecht, W.: xtensor: multi-dimensional arrays with broadcasting and lazy computing (2016). https:\/\/github.com\/xtensor-stack\/xtensor"},{"issue":"239","key":"1_CR17","first-page":"2","volume":"2014","author":"D Merkel","year":"2014","unstructured":"Merkel, D.: Docker: lightweight linux containers for consistent development and deployment. Linux J. 2014(239), 2 (2014)","journal-title":"Linux J."},{"key":"1_CR18","doi-asserted-by":"publisher","unstructured":"Milletari, F., Navab, N., Ahmadi, S.A.: V-net: fully convolutional neural networks for volumetric medical image segmentation. In: 2016 Fourth International Conference on 3D Vision (3DV), pp. 565\u2013571 (2016). https:\/\/doi.org\/10.1109\/3DV.2016.79","DOI":"10.1109\/3DV.2016.79"},{"key":"1_CR19","doi-asserted-by":"publisher","unstructured":"Pham, M., Nguyen-Ho, T., Dao, T.T.P., Nguyen, T., Tran, M.: Semi-supervised organ segmentation with mask propagation refinement and uncertainty estimation for data generation. In: Ma, J., Wang, B. (eds.) Fast and Low-Resource Semi-supervised Abdominal Organ Segmentation - MICCAI 2022 Challenge, FLARE 2022, Held in Conjunction with MICCAI 2022, Singapore, September 22, 2022, Proceedings. Lecture Notes in Computer Science, vol. 13816, pp. 163\u2013177. Springer, Cham (2022). https:\/\/doi.org\/10.1007\/978-3-031-23911-3_15","DOI":"10.1007\/978-3-031-23911-3_15"},{"key":"1_CR20","unstructured":"Tan, M., Le, Q.: Efficientnetv2: smaller models and faster training. In: Meila, M., Zhang, T. (eds.) Proceedings of the 38th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol.\u00a0139, pp. 10096\u201310106. PMLR (2021). https:\/\/proceedings.mlr.press\/v139\/tan21a.html"},{"key":"1_CR21","unstructured":"Vaswani, A., et al.: Attention is all you need (2023)"},{"key":"1_CR22","unstructured":"Wu, J., et al.: Medical SAM adapter: adapting segment anything model for medical image segmentation (2023)"},{"key":"1_CR23","doi-asserted-by":"crossref","unstructured":"Wu, K., et al.: Tinyvit: fast pretraining distillation for small vision transformers. In: European Conference on Computer Vision (ECCV) (2022)","DOI":"10.1007\/978-3-031-19803-8_5"},{"issue":"7","key":"1_CR24","doi-asserted-by":"publisher","DOI":"10.1016\/j.patter.2022.100543","volume":"3","author":"Z Xu","year":"2022","unstructured":"Xu, Z., et al.: Codabench: flexible, easy-to-use, and reproducible meta-benchmark platform. Patterns 3(7), 100543 (2022)","journal-title":"Patterns"},{"key":"1_CR25","unstructured":"Ye, J., et al.: SA-Med2D-20M dataset: segment anything in 2D medical imaging with 20 million masks (2023)"},{"key":"1_CR26","unstructured":"Zhang, C., et al.: Faster segment anything: towards lightweight SAM for mobile applications (2023). https:\/\/arxiv.org\/abs\/2306.14289"},{"key":"1_CR27","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Cai, H., Han, S.: Efficientvit-SAM: accelerated segment anything model without accuracy loss (2024)","DOI":"10.1109\/CVPRW63382.2024.00782"}],"container-title":["Lecture Notes in Computer Science","Medical Image Segmentation Foundation Models. CVPR 2024 Challenge: Segment Anything in Medical Images on Laptop"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-81854-7_1","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T09:23:48Z","timestamp":1757582628000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-81854-7_1"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031818530","9783031818547"],"references-count":27,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-81854-7_1","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"19 February 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"The authors have no competing interests to declare that are relevant to the content of this article.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Disclosure of Interests"}},{"value":"MedSAM on Laptop","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Medical Image Segmentation Challenge","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Seattle, WA","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"USA","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 June 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 June 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"medsam2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.codabench.org\/competitions\/1847\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}