{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,3,18]],"date-time":"2026-03-18T21:21:55Z","timestamp":1773868915189,"version":"3.50.1"},"reference-count":16,"publisher":"Springer Science and Business Media LLC","issue":"1","license":[{"start":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T00:00:00Z","timestamp":1757548800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T00:00:00Z","timestamp":1757548800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["Int J CARS"],"DOI":"10.1007\/s11548-025-03517-8","type":"journal-article","created":{"date-parts":[[2025,9,11]],"date-time":"2025-09-11T11:01:18Z","timestamp":1757588478000},"page":"93-102","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":6,"title":["Ultrasam: a foundation model for ultrasound using large open-access segmentation datasets"],"prefix":"10.1007","volume":"21","author":[{"ORCID":"https:\/\/orcid.org\/0009-0000-8654-2270","authenticated-orcid":false,"given":"Adrien","family":"Meyer","sequence":"first","affiliation":[]},{"given":"Aditya","family":"Murali","sequence":"additional","affiliation":[]},{"given":"Farahdiba","family":"Zarin","sequence":"additional","affiliation":[]},{"given":"Didier","family":"Mutter","sequence":"additional","affiliation":[]},{"given":"Nicolas","family":"Padoy","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2025,9,11]]},"reference":[{"key":"3517_CR1","doi-asserted-by":"publisher","unstructured":"Tyagi A, Tyagi A, Kaur M, Aggarwal R, Soni KD, Sivaswamy J, Trikha A (2024) Nerve block target localization and needle guidance for autonomous robotic ultrasound guided regional anesthesia. In: 2024 IEEE\/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 5867\u20135872. https:\/\/doi.org\/10.1109\/IROS58592.2024.10801467","DOI":"10.1109\/IROS58592.2024.10801467"},{"key":"3517_CR2","unstructured":"Zhao Q, Lyu S, Bai W, Cai L, Liu B, Wu M, Sang X, Yang M, Chen L (2022) A multi-modality ovarian tumor ultrasound image dataset for unsupervised cross-domain semantic segmentation. CoRR arxiv:2207.06799"},{"key":"3517_CR3","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2024.103202","volume":"96","author":"J Jiao","year":"2024","unstructured":"Jiao J, Zhou J, Li X, Xia M, Huang Y, Huang L, Wang N, Zhang X, Zhou S, Wang Y, Guo Y (2024) USFM: a universal ultrasound foundation model generalized to tasks and organs towards label efficient image analysis. Med Image Anal 96:103202","journal-title":"Med Image Anal"},{"key":"3517_CR4","doi-asserted-by":"crossref","unstructured":"Kang Q, Gao J, Li K, Lao Q (2023) Deblurring masked autoencoder is better recipe for ultrasound image recognition. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 352\u2013362. Springer","DOI":"10.1007\/978-3-031-43907-0_34"},{"key":"3517_CR5","doi-asserted-by":"crossref","unstructured":"Lin X, Xiang Y, Yu L, Yan Z (2024) Beyond adapting sam: Towards end-to-end ultrasound image segmentation via auto prompting. In: International Conference on Medical Image Computing and Computer-Assisted Intervention, pp. 24\u201334. Springer","DOI":"10.1007\/978-3-031-72111-3_3"},{"key":"3517_CR6","doi-asserted-by":"publisher","first-page":"1005","DOI":"10.1109\/TMI.2024.3472672","volume":"44","author":"H Chen","year":"2024","unstructured":"Chen H, Cai Y, Wang C, Chen L, Zhang B, Han H, Guo Y, Ding H, Zhang Q (2024) Multi-organ foundation model for universal ultrasound image segmentation with task prompt and anatomical prior. IEEE Trans Med Imaging 44:1005\u20131018","journal-title":"IEEE Trans Med Imaging"},{"key":"3517_CR7","doi-asserted-by":"crossref","unstructured":"Ravishankar H, Patil R, Melapudi V, Annangi P (2023) Sonosam-segment anything on ultrasound images. In: International Workshop on Advances in Simplifying Medical Ultrasound, pp. 23\u201333. Springer","DOI":"10.1007\/978-3-031-44521-7_3"},{"key":"3517_CR8","unstructured":"Tu Z, Gu L, Wang B. Xixiand\u00a0Jiang (2024) Ultrasound sam adapter: Adapting sam for breast lesion segmentation in ultrasound images. arXiv preprint arXiv:2404.14837"},{"issue":"1","key":"3517_CR9","doi-asserted-by":"publisher","first-page":"654","DOI":"10.1038\/s41467-024-44824-z","volume":"15","author":"J Ma","year":"2024","unstructured":"Ma J, He Y, Li F, Han L, You C, Wang B (2024) Segment anything in medical images. Nat Commun 15(1):654","journal-title":"Nat Commun"},{"key":"3517_CR10","doi-asserted-by":"publisher","DOI":"10.1016\/j.media.2025.103547","volume":"102","author":"J Wu","year":"2025","unstructured":"Wu J, Wang Z, Hong M, Ji W, Fu H, Xu Y, Xu M, Jin Y (2025) Medical sam adapter: Adapting segment anything model for medical image segmentation. Med Image Anal 102:103547. https:\/\/doi.org\/10.1016\/j.media.2025.103547","journal-title":"Med Image Anal"},{"issue":"4","key":"3517_CR11","doi-asserted-by":"publisher","first-page":"3110","DOI":"10.1002\/mp.16812","volume":"51","author":"W G\u00f3mez-Flores","year":"2024","unstructured":"G\u00f3mez-Flores W, Gregorio-Calas MJ, Pereira WCDA (2024) Bus-bra: a breast ultrasound dataset for assessing computer-aided diagnosis systems. Med Phys 51(4):3110\u20133123","journal-title":"Med Phys"},{"key":"3517_CR12","doi-asserted-by":"publisher","DOI":"10.1016\/j.compbiomed.2022.106424","volume":"152","author":"Q He","year":"2023","unstructured":"He Q, Bano S, Liu J, Liu W, Stoyanov D, Zuo S (2023) Query2: query over queries for improving gastrointestinal stromal tumour detection in an endoscopic ultrasound. Comput Biol Med 152:106424","journal-title":"Comput Biol Med"},{"key":"3517_CR13","doi-asserted-by":"crossref","unstructured":"Kirillov A, Mintun E, Ravin N, Mao H, Rolland C, Gustafson L, Xiao T, Whitehead S, Berg A, Lo W-Y, Dollar P, Girshick R (2023) Segment anything. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 4015\u20134026","DOI":"10.1109\/ICCV51070.2023.00371"},{"key":"3517_CR14","doi-asserted-by":"crossref","unstructured":"Cheng B, Misra I, Schwing AG, Kirillov A, Girdhar R (2022) Masked-attention mask transformer for universal image segmentation. CVPR","DOI":"10.1109\/CVPR52688.2022.00135"},{"key":"3517_CR15","unstructured":"Oquab M, Darcet T, Moutakanni T, Vo HV, Szafraniec M, Khalidov V, Fernandez P, HAZIZA D, Massa F, El-Nouby A, Assran M, Ballas N, Galuba W, Howes R, Huang P-Y, Li S-W, Misra I, Rabbat M, Sharma V, Synnaeve G, Xu H, Jegou H, Mairal J, Labatut P, Joulin A, Bojanowski P (2024) DINOv2: Learning robust visual features without supervision. Transactions on Machine Learning Research"},{"key":"3517_CR16","unstructured":"Chen K, Wang J, Pang J, Cao Y, Xiong Y, Li X, Sun S, Feng W, Liu Z, Xu J, Zhang Z, Cheng D, Zhu C, Cheng T, Zhao Q, Li B, Lu X, Zhu R, Wu Y, Dai J, Wang J, Shi J, Ouyang W, Loy CC, Lin D (2019) Mmdetection: open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155"}],"container-title":["International Journal of Computer Assisted Radiology and Surgery"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11548-025-03517-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11548-025-03517-8","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11548-025-03517-8.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2026,2,23]],"date-time":"2026-02-23T20:03:47Z","timestamp":1771877027000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11548-025-03517-8"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,9,11]]},"references-count":16,"journal-issue":{"issue":"1","published-online":{"date-parts":[[2026,1]]}},"alternative-id":["3517"],"URL":"https:\/\/doi.org\/10.1007\/s11548-025-03517-8","relation":{},"ISSN":["1861-6429"],"issn-type":[{"value":"1861-6429","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,9,11]]},"assertion":[{"value":"12 June 2025","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"26 August 2025","order":2,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"11 September 2025","order":3,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no conflict of interest.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"No informed consent was required as the study did not involve human or animal participants.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Consent to Participate"}}]}}