{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,3]],"date-time":"2026-04-03T01:05:22Z","timestamp":1775178322451,"version":"3.50.1"},"publisher-location":"Cham","reference-count":32,"publisher":"Springer Nature Switzerland","isbn-type":[{"value":"9783031440120","type":"print"},{"value":"9783031440137","type":"electronic"}],"license":[{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2023,1,1]],"date-time":"2023-01-01T00:00:00Z","timestamp":1672531200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2023]]},"DOI":"10.1007\/978-3-031-44013-7_10","type":"book-chapter","created":{"date-parts":[[2023,9,15]],"date-time":"2023-09-15T23:02:39Z","timestamp":1694818959000},"page":"92-101","update-policy":"https:\/\/doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":10,"title":["Adapting Segment Anything Model (SAM) for\u00a0Retinal OCT"],"prefix":"10.1007","author":[{"given":"Botond","family":"Fazekas","sequence":"first","affiliation":[]},{"given":"Jos\u00e9","family":"Morano","sequence":"additional","affiliation":[]},{"given":"Dmitrii","family":"Lachinov","sequence":"additional","affiliation":[]},{"given":"Guilherme","family":"Aresta","sequence":"additional","affiliation":[]},{"given":"Hrvoje","family":"Bogunovi\u0107","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2023,9,16]]},"reference":[{"key":"10_CR1","unstructured":"Apostolopoulos, S., Ciller, C., Sznitman, R., De Zanet, S.: Simultaneous classification and segmentation of cysts in retinal oct. In: Proceedings of MICCAI Retinal OCT Fluid Challenge (RETOUCH), pp. 22\u201329 (2017)"},{"issue":"8","key":"10_CR2","doi-asserted-by":"publisher","first-page":"1858","DOI":"10.1109\/TMI.2019.2901398","volume":"38","author":"H Bogunovic","year":"2019","unstructured":"Bogunovic, H., et al.: RETOUCH: the retinal OCT fluid detection and segmentation benchmark and challenge. IEEE Trans. Med. Imaging 38(8), 1858\u20131874 (2019). https:\/\/doi.org\/10.1109\/TMI.2019.2901398","journal-title":"IEEE Trans. Med. Imaging"},{"issue":"10","key":"10_CR3","doi-asserted-by":"publisher","first-page":"S78","DOI":"10.1016\/j.ophtha.2016.04.056","volume":"123","author":"PA Campochiaro","year":"2016","unstructured":"Campochiaro, P.A., Aiello, L.P., Rosenfeld, P.J.: Anti-vascular endothelial growth factor agents in the treatment of retinal disease: from bench to bedside. Ophthalmology 123(10), S78\u2013S88 (2016). https:\/\/doi.org\/10.1016\/j.ophtha.2016.04.056","journal-title":"Ophthalmology"},{"key":"10_CR4","unstructured":"Chen, Q., et al.: Automatic segmentation of fluid-associated abnormalities and pigment epithelial detachment in retinal sd-oct images. In: Proceedings of MICCAI Retinal OCT Fluid Challenge (RETOUCH), pp. 15\u201321 (2017)"},{"key":"10_CR5","unstructured":"Deng, R., et al.: Segment anything model (SAM) for digital pathology: assess zero-shot segmentation on whole slide imaging. In: MIDL (2023)"},{"key":"10_CR6","unstructured":"Dosovitskiy, A., et al.: An image is worth 16x16 words: transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)"},{"key":"10_CR7","unstructured":"Gao, Y., Xia, W., Hu, D., Gao, X.: DeSAM: decoupling segment anything model for generalizable medical image segmentation, arxiv.org\/abs\/2306.00499 (2023)"},{"key":"10_CR8","unstructured":"Houlsby, N., et al.: Parameter-efficient transfer learning for nlp. In: International Conference on Machine Learning, pp. 2790\u20132799. PMLR (2019)"},{"key":"10_CR9","unstructured":"Hu, E.J., et al.: LoRA: low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685 (2021)"},{"key":"10_CR10","doi-asserted-by":"publisher","unstructured":"Hu, X., Xu, X., Shi, Y.: How to efficiently adapt large segmentation model (SAM) to medical image (2023). https:\/\/doi.org\/10.48550\/arxiv.2306.13731, arxiv.org\/abs\/2306.13731","DOI":"10.48550\/arxiv.2306.13731"},{"key":"10_CR11","unstructured":"Huang, Y., et al.: Segment Anything Model for Medical Images? arXiv preprint arXiv:2304.14660 (2023)"},{"key":"10_CR12","doi-asserted-by":"publisher","unstructured":"Isensee, F., Jaeger, P.F., Kohl, S.A.A., Petersen, J., Maier-Hein, K.H.: nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nat. Methods 18(2), 203\u2013211 (2021). https:\/\/doi.org\/10.1038\/s41592-020-01008-z,www.nature.com\/articles\/s41592-020-01008-z","DOI":"10.1038\/s41592-020-01008-z,"},{"key":"10_CR13","doi-asserted-by":"publisher","unstructured":"Isensee, F., Jaeger, P.F., Kohl, S.A.A., Petersen, J., Maier-Hein, K.H.: nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. Nat. Methods 18(2), 203\u2013211 (2021). https:\/\/doi.org\/10.1038\/s41592-020-01008-z, www.nature.com\/articles\/s41592-020-01008-z","DOI":"10.1038\/s41592-020-01008-z"},{"key":"10_CR14","doi-asserted-by":"crossref","unstructured":"Ji, G.P., Fan, D.P., Xu, P., Cheng, M.M., Zhou, B., Gool, L.V.: SAM Struggles in Concealed Scenes - Empirical Study on \"Segment Anything\". arXiv preprint arXiv:2304.06022 (2023)","DOI":"10.1007\/s11432-023-3881-x"},{"key":"10_CR15","doi-asserted-by":"crossref","unstructured":"Ji, W., Li, J., Bi, Q., Liu, T., Li, W., Cheng, L.: Segment anything is not always perfect: an investigation of SAM on different real-world applications. arXiv preprint arXiv:2304.05750 (2023)","DOI":"10.1007\/s11633-024-1526-0"},{"key":"10_CR16","unstructured":"Kang, S.H., Park, H.S., Jang, J., Jeon, K.: Deep neural networks for the detection and segmentation of the retinal fluid in oct images. In: MICCAI Retinal OCT Fluid Challenge (RETOUCH) (2017)"},{"key":"10_CR17","doi-asserted-by":"publisher","unstructured":"Kirillov, A., et al.: Segment Anything. arXiv (2023). https:\/\/doi.org\/10.48550\/arxiv.2304.02643, arxiv.org\/abs\/2304.02643","DOI":"10.48550\/arxiv.2304.02643"},{"key":"10_CR18","doi-asserted-by":"publisher","unstructured":"Kurtzer, G.M., Sochat, V., Bauer, M.W.: Singularity: scientific containers for mobility of compute. PLOS ONE 12(5), e0177459 (2017) https:\/\/doi.org\/10.1371\/journal.pone.0177459, www.journals.plos.org\/plosone\/article?id=10.1371\/journal.pone.0177459","DOI":"10.1371\/journal.pone.0177459"},{"key":"10_CR19","doi-asserted-by":"publisher","unstructured":"Lei, W., Wei, X., Zhang, X., Li, K., Zhang, S.: MedLSAM: localize and segment anything model for 3D medical images (2023). https:\/\/doi.org\/10.48550\/arxiv.2306.14752, arxiv.org\/abs\/2306.14752","DOI":"10.48550\/arxiv.2306.14752"},{"key":"10_CR20","doi-asserted-by":"publisher","first-page":"100","DOI":"10.1016\/j.media.2019.02.011","volume":"54","author":"D Lu","year":"2019","unstructured":"Lu, D., et al.: Deep-learning based multiclass retinal fluid segmentation and detection in optical coherence tomography images using a fully convolutional neural network. Med. Image Anal. 54, 100\u2013110 (2019)","journal-title":"Med. Image Anal."},{"key":"10_CR21","doi-asserted-by":"crossref","unstructured":"Ma, J., Wang, B.: Segment anything in medical images. arXiv preprint arXiv:2304.12306 (2023)","DOI":"10.1038\/s41467-024-44824-z"},{"key":"10_CR22","doi-asserted-by":"publisher","unstructured":"Moor, M., et al.: Foundation models for generalist medical artificial intelligence. Nature 616(7956), 259\u2013265 (2023). https:\/\/doi.org\/10.1038\/s41586-023-05881-4,www.nature.com\/articles\/s41586-023-05881-4","DOI":"10.1038\/s41586-023-05881-4,"},{"key":"10_CR23","unstructured":"Morley, D., Foroosh, H., Shaikh, S., Bagci, U.: Simultaneous detection and quantification of retinal fluid with deep learning. arXiv preprint arXiv:1708.05464 (2017)"},{"key":"10_CR24","doi-asserted-by":"crossref","unstructured":"Ndipenoch, N., Miron, A., Wang, Z., Li, Y.: nnUNet RASPP for Retinal OCT Fluid Detection, Segmentation and Generalisation over Variations of Data Sources. arXiv preprint arXiv:2302.13195 (2023)","DOI":"10.1109\/ACCESS.2024.3369913"},{"key":"10_CR25","unstructured":"Radford, A., et al.: Learning transferable visual models from natural language supervision. In: International Conference on Machine Learning, pp. 8748\u20138763. PMLR (2021)"},{"key":"10_CR26","unstructured":"Rashno, A., Koozekanani, D.D., Parhi, K.K.: Detection and segmentation of various types of fluids with graph shortest path and deep learning approaches. In: Proceedings of MICCAI Retinal OCT Fluid Challenge (RETOUCH), pp. 54\u201362 (2017)"},{"key":"10_CR27","unstructured":"Roy, S., et al.: SAM.MD: zero-shot medical image segmentation capabilities of the segment anything model. In: MIDL (2023)"},{"key":"10_CR28","doi-asserted-by":"publisher","unstructured":"Tennakoon, R., Gostar, A.K., Hoseinnezhad, R., Bab-Hadiashar, A.: Retinal fluid segmentation in OCT images using adversarial loss based convolutional neural networks. In: International Symposium on Biomedical Imaging (ISBI), pp. 1436\u20131440. IEEE Computer Society (May 2018). https:\/\/doi.org\/10.1109\/ISBI.2018.8363842","DOI":"10.1109\/ISBI.2018.8363842"},{"key":"10_CR29","unstructured":"Wu, J., et al.: Medical SAM adapter: adapting segment anything model for medical image segmentation. arXiv preprint arXiv:2304.12620 (2023)"},{"key":"10_CR30","unstructured":"Yadav, S., Gopinath, K., Sivaswamy, J.: A generalized motion pattern and fcn based approach for retinal fluid detection and segmentation. arXiv preprint arXiv:1712.01073 (2017)"},{"key":"10_CR31","doi-asserted-by":"publisher","unstructured":"Zhang, K., Liu, D.: Customized Segment Anything Model for medical image segmentation (2023). https:\/\/doi.org\/10.48550\/arxiv.2304.13785, arxiv.org\/abs\/2304.13785","DOI":"10.48550\/arxiv.2304.13785"},{"key":"10_CR32","doi-asserted-by":"publisher","unstructured":"Zhang, Y., Jiao, R.: How Segment Anything Model (SAM) boost medical image segmentation: a survey (2023). https:\/\/doi.org\/10.48550\/arxiv.2305.03678, arxiv.org\/abs\/2305.03678","DOI":"10.48550\/arxiv.2305.03678"}],"container-title":["Lecture Notes in Computer Science","Ophthalmic Medical Image Analysis"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-44013-7_10","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,28]],"date-time":"2024-10-28T05:29:10Z","timestamp":1730093350000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-44013-7_10"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023]]},"ISBN":["9783031440120","9783031440137"],"references-count":32,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-44013-7_10","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"value":"0302-9743","type":"print"},{"value":"1611-3349","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023]]},"assertion":[{"value":"16 September 2023","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"OMIA","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"International Workshop on Ophthalmic Medical Image Analysis","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Vancouver, BC","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Canada","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2023","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12 October 2023","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"12 October 2023","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"10","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"omia2023","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/sites.google.com\/view\/omiax\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT System","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"27","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"16","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"59% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}