{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,21]],"date-time":"2026-04-21T14:53:43Z","timestamp":1776783223474,"version":"3.51.2"},"publisher-location":"Cham","reference-count":17,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031818530","type":"print"},{"value":"9783031818547","type":"electronic"}],"license":[{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,1,1]],"date-time":"2025-01-01T00:00:00Z","timestamp":1735689600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2025]]},"DOI":"10.1007\/978-3-031-81854-7_9","type":"book-chapter","created":{"date-parts":[[2025,2,18]],"date-time":"2025-02-18T16:08:57Z","timestamp":1739894937000},"page":"137-150","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":3,"title":["Efficient Quantization-Aware Training on\u00a0Segment Anything Model in\u00a0Medical Images and\u00a0Its Deployment"],"prefix":"10.1007","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-3978-1492","authenticated-orcid":false,"given":"Haisheng","family":"Lu","sequence":"first","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0009-0008-8597-7166","authenticated-orcid":false,"given":"Yujie","family":"Fu","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-5032-6039","authenticated-orcid":false,"given":"Fan","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"https:\/\/orcid.org\/0000-0002-6930-8674","authenticated-orcid":false,"given":"Le","family":"Zhang","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,2,19]]},"reference":[{"key":"9_CR1","unstructured":"Hubara, I., Nahshan, Y., Hanani, Y., Banner, R., Soudry, D.: Accurate post training quantization with small calibration sets. In: International Conference on Machine Learning, pp. 4466\u20134475. PMLR (2021)"},{"key":"9_CR2","unstructured":"Jacob, B., et al.: Quantization and training of neural networks for efficient integer-arithmetic-only inference. CoRR abs\/1712.05877 (2017). http:\/\/arxiv.org\/abs\/1712.05877"},{"key":"9_CR3","doi-asserted-by":"crossref","unstructured":"Kirillov, A., et al.: Segment anything. In: Proceedings of the International Conference on Computer Vision, pp. 4015\u20134026 (2023)","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"9_CR4","unstructured":"Le, B.H., Nguyen-Vu, D.K., Nguyen-Mau, T.H., Nguyen, H.D., Tran, M.T.: MedficientSAM: a robust medical segmentation model with optimized inference pipeline for limited clinical settings. In: Submitted to CVPR 2024: Segment Anything in Medical Images on Laptop (2024). https:\/\/openreview.net\/forum?id=aa0f77RKI0. Under review"},{"key":"9_CR5","unstructured":"Li, Y., Xu, S., Zhang, B., Cao, X., Gao, P., Guo, G.: Q-ViT: accurate and fully quantized low-bit vision transformer. In: Advances in Neural Information Processing Systems, vol. 35, pp. 34451\u201334463 (2022)"},{"key":"9_CR6","doi-asserted-by":"crossref","unstructured":"Liu, Y., Yang, H., Dong, Z., Keutzer, K., Du, L., Zhang, S.: NoisyQuant: noisy bias-enhanced post-training activation quantization for vision transformers. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 20321\u201320330 (2023)","DOI":"10.1109\/CVPR52729.2023.01946"},{"key":"9_CR7","doi-asserted-by":"crossref","unstructured":"Liu, Z., Cheng, K.T., Huang, D., Xing, E.P., Shen, Z.: Nonuniform-to-uniform quantization: towards accurate quantization via generalized straight-through estimation. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 4942\u20134952 (2022)","DOI":"10.1109\/CVPR52688.2022.00489"},{"key":"9_CR8","unstructured":"Liu, Z., Wang, Y., Han, K., Zhang, W., Ma, S., Gao, W.: Post-training quantization for vision transformer. In: Advances in Neural Information Processing Systems, vol. 34, pp. 28092\u201328103 (2021)"},{"key":"9_CR9","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2021.102035","volume":"71","author":"J Ma","year":"2021","unstructured":"Ma, J., et al.: Loss odyssey in medical image segmentation. Med. Image Anal. 71, 102035 (2021)","journal-title":"Med. Image Anal."},{"issue":"1","key":"9_CR10","doi-asserted-by":"publisher","first-page":"654","DOI":"10.1038\/s41467-024-44824-z","volume":"15","author":"J Ma","year":"2024","unstructured":"Ma, J., He, Y., Li, F., Han, L., You, C., Wang, B.: Segment anything in medical images. Nat. Commun. 15(1), 654 (2024)","journal-title":"Nat. Commun."},{"key":"9_CR11","doi-asserted-by":"publisher","unstructured":"Pappalardo, A.: Xilinx\/brevitas. https:\/\/doi.org\/10.5281\/zenodo.3333552","DOI":"10.5281\/zenodo.3333552"},{"key":"9_CR12","doi-asserted-by":"crossref","unstructured":"Pfefferle, A.T., Purucker, L., Hutter, F.: DAFT: data-aware fine-tuning of foundation models for efficient and effective medical image segmentation. In: Submitted to CVPR 2024: Segment Anything In Medical Images On Laptop (2024). https:\/\/openreview.net\/forum?id=PObXviy706. Under review","DOI":"10.1007\/978-3-031-81854-7_2"},{"key":"9_CR13","doi-asserted-by":"crossref","unstructured":"Shen, M., et al.: Once quantization-aware training: high performance extremely low-bit architecture search. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 5340\u20135349 (2021)","DOI":"10.1109\/ICCV48922.2021.00529"},{"issue":"7","key":"9_CR14","doi-asserted-by":"publisher","DOI":"10.1016\/j.patter.2022.100543","volume":"3","author":"Z Xu","year":"2022","unstructured":"Xu, Z., et al.: Codabench: flexible, easy-to-use, and reproducible meta-benchmark platform. Patterns 3(7), 100543 (2022)","journal-title":"Patterns"},{"key":"9_CR15","unstructured":"Zhang, C., et al.: Faster segment anything: towards lightweight SAM for mobile applications. arXiv preprint arXiv:2306.14289 (2023)"},{"key":"9_CR16","unstructured":"Zhang, J., Zhou, Y., Saab, R.: Post-training quantization for neural networks with provable guarantees. CoRR abs\/2201.11113 (2022). https:\/\/arxiv.org\/abs\/2201.11113"},{"key":"9_CR17","doi-asserted-by":"crossref","unstructured":"Zhang, Z., Cai, H., Han, S.: EfficientViT-SAM: accelerated segment anything model without performance loss. In: CVPR Workshop: Efficient Large Vision Models (2024)","DOI":"10.1109\/CVPRW63382.2024.00782"}],"container-title":["Lecture Notes in Computer Science","Medical Image Segmentation Foundation Models. CVPR 2024 Challenge: Segment Anything in Medical Images on Laptop"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-81854-7_9","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T09:23:55Z","timestamp":1757582635000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-81854-7_9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025]]},"ISBN":["9783031818530","9783031818547"],"references-count":17,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-81854-7_9","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025]]},"assertion":[{"value":"19 February 2025","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"The authors have no competing interests to declare that are relevant to the content of this article.","order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Disclosure of Interests"}},{"value":"MedSAM on Laptop","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Medical Image Segmentation Challenge","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Seattle, WA","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"USA","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2024","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 June 2024","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17 June 2024","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"1","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"medsam2024","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/www.codabench.org\/competitions\/1847\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}}]}}